commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
bf0408024d5ebaeca25d263d0d2f4f12b8a01aa5 | Create configparser.py | configparser.py | configparser.py | #!/usr/bin/env python
# encoding=utf-8
class IrregualrConfigParser(object):
COMMENT_FLAGS = ("#", ";")
def __init__(self):
super(IrregualrConfigParser, self).__init__()
self.__content = []
def read(self, fn_or_fp):
content = []
if isinstance(fn_or_fp, file):
content = [line.strip() for line in fn_or_fp.xreadlines()]
else:
with open(fn_or_fp, "r") as fp:
content = [line.strip() for line in fp.xreadlines()]
self.__content = self.parse_content(content)
def write(self, fp):
for line in self.__content:
if line is None:
fp.write("\n")
continue
comment = line.get("comment", None)
section = line.get("section", None)
option = line.get("option", None)
value = line.get("value", None)
if comment:
fp.write(comment + "\n")
continue
elif section and option is None:
fp.write("[{0}]\n".format(section))
continue
elif option and value is not None:
fp.write("{0} = {1}\n".format(option, value))
else:
fp.write(option + "\n")
def parse_content(self, content):
def _parse_content():
section = None
for line in content:
if not line:
yield None
continue
if line[0] in self.COMMENT_FLAGS:
yield {"comment": line}
continue
if line[0] == '[' and line[-1] == ']':
section = line[1:-1]
yield {"section": section}
continue
if "=" in line:
option, value = [i.strip() for i in line.split("=", 1)]
yield {"option": option, "value": value, "section": section}
else:
yield {"option": line, "section": section}
if not content:
return []
else:
return list(_parse_content())
def __get_option(self, section, option):
for ln, line in enumerate(self.__content):
if not line:
continue
section_name = line.get("section", None)
option_name = line.get("option", None)
if not section_name or not option_name:
continue
if section_name == section and option_name == option:
return ln, line
return -1, None
def __get_section(self, section):
for ln, line in enumerate(self.__content):
if not line:
continue
section_name = line.get("section", None)
if section_name and section_name == section:
return ln, line
return -1, None
def has_section(self, section):
ln, _ = self.__get_section(section)
return ln != -1
def has_option(self, section, option):
ln, _ = self.__get_option(section, option)
return ln != -1
def __add_section(self, section):
line = {"section": section}
self.__content.append(line)
return len(self.__content) - 1, line
def add_section(self, section):
ln, _ = self.__add_section(section)
return ln != -1
def get(self, section, option):
_, option_line = self.__get_option(section, option)
if not option_line:
return None
else:
return option_line.get("value", None)
def set(self, section, option, value):
_, option_line = self.__get_option(section, option)
if option_line:
option_line['value'] = value
return True
ln, _ = self.__get_section(section)
if ln == -1:
ln, _ = self.__add_section(section)
line = {"option": option, "value": value, "section": section}
self.__content.insert(ln + 1, line)
return True
| Python | 0.000002 | |
29ec4f28abf535a179e77f938092b7c20974847d | Create LSBSteg.py | LSBSteg.py | LSBSteg.py | #!/usr/bin/python
# Stego Dropper For Pentesters v0.1
# Pulled from https://github.com/RobinDavid/LSB-Steganography
import cv2.cv as cv
import sys
class SteganographyException(Exception):
pass
class LSBSteg():
def __init__(self, im):
self.image = im
self.width = im.width
self.height = im.height
self.size = self.width * self.height
self.nbchannels = im.channels
self.maskONEValues = [1,2,4,8,16,32,64,128]
#Mask used to put one ex:1->00000001, 2->00000010 .. associated with OR bitwise
self.maskONE = self.maskONEValues.pop(0) #Will be used to do bitwise operations
self.maskZEROValues = [254,253,251,247,239,223,191,127]
#Mak used to put zero ex:254->11111110, 253->11111101 .. associated with AND bitwise
self.maskZERO = self.maskZEROValues.pop(0)
self.curwidth = 0 #Current width position
self.curheight = 0 #Current height position
self.curchan = 0 #Current channel position
def saveImage(self,filename):
# Save the image using the given filename
cv.SaveImage(filename, self.image)
def putBinaryValue(self, bits): #Put the bits in the image
for c in bits:
val = list(self.image[self.curwidth,self.curheight]) #Get the pixel value as a list
if int(c) == 1:
val[self.curchan] = int(val[self.curchan]) | self.maskONE #OR with maskONE
else:
val[self.curchan] = int(val[self.curchan]) & self.maskZERO #AND with maskZERO
self.image[self.curwidth,self.curheight] = tuple(val)
self.nextSpace() #Move "cursor" to the next space
def nextSpace(self):#Move to the next slot were information can be taken or put
if self.curchan == self.nbchannels-1: #Next Space is the following channel
self.curchan = 0
if self.curwidth == self.width-1: #Or the first channel of the next pixel of the same line
self.curwidth = 0
if self.curheight == self.height-1:#Or the first channel of the first pixel of the next line
self.curheight = 0
if self.maskONE == 128: #Mask 1000000, so the last mask
raise SteganographyException, "Image filled"
else: #Or instead of using the first bit start using the second and so on..
self.maskONE = self.maskONEValues.pop(0)
self.maskZERO = self.maskZEROValues.pop(0)
else:
self.curheight +=1
else:
self.curwidth +=1
else:
self.curchan +=1
def readBit(self): #Read a single bit int the image
val = self.image[self.curwidth,self.curheight][self.curchan]
val = int(val) & self.maskONE
self.nextSpace()
if val > 0:
return "1"
else:
return "0"
def readByte(self):
return self.readBits(8)
def readBits(self, nb): #Read the given number of bits
bits = ""
for i in range(nb):
bits += self.readBit()
return bits
def byteValue(self, val):
return self.binValue(val, 8)
def binValue(self, val, bitsize): #Return the binary value of an int as a byte
binval = bin(val)[2:]
if len(binval) > bitsize:
raise SteganographyException, "binary value larger than the expected size"
while len(binval) < bitsize:
binval = "0"+binval
return binval
def hideText(self, txt):
l = len(txt)
binl = self.binValue(l, 16) #Length coded on 2 bytes so the text size can be up to 65536 bytes long
self.putBinaryValue(binl) #Put text length coded on 4 bytes
for char in txt: #And put all the chars
c = ord(char)
self.putBinaryValue(self.byteValue(c))
def unhideText(self):
ls = self.readBits(16) #Read the text size in bytes
l = int(ls,2)
i = 0
unhideTxt = ""
while i < l: #Read all bytes of the text
tmp = self.readByte() #So one byte
i += 1
unhideTxt += chr(int(tmp,2)) #Every chars concatenated to str
return unhideTxt
def hideImage(self, imtohide):
w = imtohide.width
h = imtohide.height
if self.width*self.height*self.nbchannels < w*h*imtohide.channels:
raise SteganographyException, "Carrier image not big enough to hold all the datas to steganography"
binw = self.binValue(w, 16) #Width coded on to byte so width up to 65536
binh = self.binValue(h, 16)
self.putBinaryValue(binw) #Put width
self.putBinaryValue(binh) #Put height
for h in range(imtohide.height): #Iterate the hole image to put every pixel values
for w in range(imtohide.width):
for chan in range(imtohide.channels):
val = imtohide[h,w][chan]
self.putBinaryValue(self.byteValue(int(val)))
def unhideImage(self):
width = int(self.readBits(16),2) #Read 16bits and convert it in int
height = int(self.readBits(16),2)
unhideimg = cv.CreateImage((width,height), 8, 3) #Create an image in which we will put all the pixels read
for h in range(height):
for w in range(width):
for chan in range(unhideimg.channels):
val = list(unhideimg[h,w])
val[chan] = int(self.readByte(),2) #Read the value
unhideimg[h,w] = tuple(val)
return unhideimg
def hideBin(self, filename):
f = open(filename,'rb')
bin = f.read()
l = len(bin)
if self.width*self.height*self.nbchannels < l+64:
raise SteganographyException, "Carrier image not big enough to hold all the datas to steganography"
self.putBinaryValue(self.binValue(l, 64))
for byte in bin:
self.putBinaryValue(self.byteValue(ord(byte)))
def unhideBin(self):
l = int(self.readBits(64),2)
output = ""
for i in range(l):
output += chr(int(self.readByte(),2))
return output
if __name__=="__main__":
pass
| Python | 0 | |
8a7fda2acf57c135e7f401ebdd8f71c3609c0eca | Create tries.py | Python/tries.py | Python/tries.py | def make_trie(*args):
trie={}
for word in args:
if type(word)!= str:
raise TypeError("Trie work only on strings")
# temp_trie and trie refer to the same dictionary object.
temp_trie=trie
for letter in word:
# here setdefault sets the letter to {}({'y':{}}) and then returns {} to temp_trie.
# So now temp_trie contains {} but trie points to ({'y':{}}).
# setdefault assigns the letter their value and returns {}
# That is why nesting takes place.
temp_trie=temp_trie.setdefault(letter,{})
temp_trie=temp_trie.setdefault('__end__','__end__')
return trie
def in_trie(trie,word):
if type(word)!= str:
raise TypeError("Trie work only on strings")
temp_trie=trie
for letter in word:
if letter not in temp_trie:
return False
temp_trie=temp_trie[letter]
if "__end__" in temp_trie:
return True
else:
return False
def remove(trie,word,depth):
if word and word[depth] not in trie:
return False
if len(word) == depth + 1:
if '__end__' in trie[word[depth]]:
del trie[word[depth]]['__end__']
if len(trie[word[depth]]) > 0 and len(trie) > 1:
return False
elif len(trie) > 1 :
del trie[word[depth]]
return False
elif len(trie[word[depth]]) > 0:
return False
else:
return True
else:
temp_trie = trie
# Recursively climb up to delete.
if remove(temp_trie[word[depth]], word, depth + 1):
if temp_trie:
del temp_trie[word[depth]]
return not temp_trie
else:
return False
trie=make_trie('hack','hackerrank')
print trie
print in_trie(trie,'hac')
print trie
| Python | 0.000001 | |
23cf747a3ff24f75d3300547f4bfdecf10c4a325 | Add next traversal util function | scrapple/utils/config.py | scrapple/utils/config.py | """
scrapple.utils.config
~~~~~~~~~~~~~~~~~~~~~
Functions related to traversing the configuration file
"""
from __future__ import print_function
def traverse_next(page, next, results):
for link in page.extract_links(next['follow_link']):
print("Loading page", link.url)
r = results
for attribute in next['scraping'].get('data'):
if attribute['field'] != "":
print("\nExtracting", attribute['field'], "attribute", sep=' ')
r[attribute['field']] = link.extract_content(attribute['selector'], attribute['attr'])
if not next['scraping'].get('next'):
yield r
else:
for next2 in next['scraping'].get('next'):
for result in traverse_next(link, next2, r):
yield result
| Python | 0.000935 | |
56b3cf07fff4d3794dcdbf99f6d7faa629fa243e | fix string manipulation in render_templatefile() | scrapy/utils/template.py | scrapy/utils/template.py | """Helper functions for working with templates"""
import os
import re
import string
def render_templatefile(path, **kwargs):
with open(path, 'rb') as file:
raw = file.read()
content = string.Template(raw).substitute(**kwargs)
render_path = path[:-len('.tmpl')] if path.endswith('.tmpl') else path
with open(render_path, 'wb') as file:
file.write(content)
if path.endswith('.tmpl'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('lost-pound')
'LostPound'
>>> string_camelcase('missing_images')
'MissingImages'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
| """Helper functions for working with templates"""
import os
import re
import string
def render_templatefile(path, **kwargs):
with open(path, 'rb') as file:
raw = file.read()
content = string.Template(raw).substitute(**kwargs)
with open(path.rstrip('.tmpl'), 'wb') as file:
file.write(content)
if path.endswith('.tmpl'):
os.remove(path)
CAMELCASE_INVALID_CHARS = re.compile('[^a-zA-Z\d]')
def string_camelcase(string):
""" Convert a word to its CamelCase version and remove invalid chars
>>> string_camelcase('lost-pound')
'LostPound'
>>> string_camelcase('missing_images')
'MissingImages'
"""
return CAMELCASE_INVALID_CHARS.sub('', string.title())
| Python | 0.000007 |
cefdd80e7cd9e4ce007e60c08114e89a46b15de7 | Truncate a protein sequence to remove signal peptide. | RemoveSignal.py | RemoveSignal.py | #!/usr/bin/python
# Copyright (c) 2014 Khoa Tran. All rights reserved.
from CalFord import *
import argparse
import sys,os
import re
signalFile = None
configPath = "calford.conf"
noSignalOutputFile = None
removedSignalOutputFile = None
def argsSanityCheck():
isOk = True
if not os.path.isfile(signalFile):
print "Error: cannot find %s"%signalFile
isOk = False
return isOk
def parseArgs():
global configPath
global signalFile
global noSignalOutputFile
global removedSignalOutputFile
parser = argparse.ArgumentParser(
description="Read the protein signal file and generate two FASTA file: "\
"one file contains proteins without signal, the other one "\
"contains processed proteins, whose signal sequence has been "\
"truncated.")
parser.add_argument("signalFile",help="input protein signal analysis result")
parser.add_argument("--config",help="path to config file",
nargs=1)
parser.add_argument("--outputNoSignal",help="write protein without signal to "\
"this file",
nargs=1,required=True)
parser.add_argument("--outputTruncatedSignal",help="write protein with signal "\
"sequence truncated to this file",
nargs=1,required=True)
args = parser.parse_args()
signalFile = args.signalFile
noSignalOutputFile = args.outputNoSignal[0]
removedSignalOutputFile = args.outputTruncatedSignal[0]
if args.config!=None:
configPath = args.config[0]
if not argsSanityCheck():
print
exit(1)
def loadSignalAnalysis(path):
TRACE5("Load signal result from %s"%path)
signalData = {}
noSignalCount = 0
noCleaveCount = 0
truncatedCount = 0
try:
f = open(path,'r')
signalRe = re.compile('(\S+)\s+(Signal .*)')
cleaveRe = re.compile('after AA (\d+)')
for line in f:
m = signalRe.match(line)
if m==None:
# no signal found
noSignalCount += 1
continue
pid = m.group(1)
m2 = cleaveRe.search(m.group(2))
if m2==None:
signalData[pid] = 0
noCleaveCount += 1
else:
signalData[pid] = int(m2.group(1))
truncatedCount += 1
f.close()
TRACE9("Found %d proteins with no signal, %d proteins with no cleave location "\
"and %d proteins has been truncated"\
%(noSignalCount,noCleaveCount,truncatedCount))
except IOError,e:
print "Error reading signal file: %s"%str(e)
return None
return signalData
def writeNoSignalProtein(fastaDb,data):
TRACE5("Writing no signal proteins to output file at %s"%noSignalOutputFile)
try:
f = open(noSignalOutputFile,'w')
except IOError,e:
print "Error writing no signal output file: %s"%str(e)
return
for p in fastaDb:
if p not in data:
f.write("%s\n"%fastaDb[p])
f.close()
def renameProtein(proteinDesc,suffix='.nosignal'):
proteinIdRe = re.compile('>(\S+)\s+(.*)')
m = proteinIdRe.match(proteinDesc)
if m==None:
TRACE0("Cannot parse protein desc: %s"%proteinDesc)
return None
return m.group(1)+suffix
def truncateSignalProtein(fastaDb,data):
TRACE5("Truncate signal proteins")
result = {}
for pid in data:
loc = data[pid]
if pid not in fastaDb:
TRACE0("Error: cannot find %s in FASTA database"%pid)
continue
p = fastaDb[pid]
s = p.split('\n')
newPid = renameProtein(s[0])
if newPid==None:
continue
seq = s[1]
if loc>=len(seq):
TRACE0("Error: cleaved location %d is larger than sequence len (%d)"\
%(loc,len(seq)))
seq = seq[loc:]
result[newPid] = ">"+newPid+"\n"+seq
return result
def writeTruncatedProtein(data):
TRACE5("Writing truncated signal proteins to output file at %s"%removedSignalOutputFile)
try:
f = open(removedSignalOutputFile,'w')
except IOError,e:
print "Error writing truncated signal output file: %s"%str(e)
return
for p in data:
f.write("%s\n"%data[p])
f.close()
parseArgs()
parseConfigFile(configPath)
print "Write no signal proteins to: %s"%noSignalOutputFile
print "Write truncated signal proteins to: %s"%removedSignalOutputFile
fastaDb = loadFasta(config['database'])
if fastaDb==None:
# error
print "Error: load FASTA file error"
exit(1)
signalData = loadSignalAnalysis(signalFile)
if signalData==None:
# error
exit(1)
truncatedDb = truncateSignalProtein(fastaDb,signalData)
writeNoSignalProtein(fastaDb,signalData)
writeTruncatedProtein(truncatedDb)
| Python | 0 | |
fd8b325bb6423c2f56d84006763ec8f6696a2745 | Test basic paths | tests/test_draw/svg/test_paths.py | tests/test_draw/svg/test_paths.py | """
weasyprint.tests.test_draw.svg.test_paths
------------------------------------------
Test how SVG simple paths are drawn.
"""
from ...testing_utils import assert_no_logs
from .. import assert_pixels
@assert_no_logs
def test_path_Hh():
assert_pixels('path_Hh', 10, 10, '''
BBBBBBBB__
BBBBBBBB__
__________
RRRRRRRR__
RRRRRRRR__
__________
GGGGGGGG__
GGGGGGGG__
BBBBBBBB__
BBBBBBBB__
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 0 1 H 8 H 1"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 0 4 H 8 4"
stroke="red" stroke-width="2" fill="none"/>
<path d="M 0 7 h 8 h 0"
stroke="lime" stroke-width="2" fill="none"/>
<path d="M 0 9 h 8 0"
stroke="blue" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Vv():
assert_pixels('path_Vv', 10, 10, '''
BB____GG__
BB____GG__
BB____GG__
BB____GG__
___RR_____
___RR_____
___RR___BB
___RR___BB
___RR___BB
___RR___BB
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 1 0 V 1 V 4"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 4 6 V 4 10"
stroke="red" stroke-width="2" fill="none"/>
<path d="M 7 0 v 0 v 4"
stroke="lime" stroke-width="2" fill="none"/>
<path d="M 9 6 v 0 4"
stroke="blue" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Ll():
assert_pixels('path_Ll', 10, 10, '''
______RR__
______RR__
______RR__
___BB_RR__
___BB_RR__
___BB_RR__
___BB_____
___BB_____
___BB_____
___BB_____
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 4 3 L 4 10"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 7 0 l 0 6"
stroke="red" stroke-width="2" fill="none"/>
</svg>
''')
@assert_no_logs
def test_path_Zz():
assert_pixels('path_Zz', 10, 10, '''
BBBBBBB___
BBBBBBB___
BB___BB___
BB___BB___
BBBBBBB___
BBBBBBB___
____RRRRRR
____RRRRRR
____RR__RR
____RRRRRR
''', '''
<style>
@page { size: 10px }
svg { display: block }
</style>
<svg width="10px" height="10px" xmlns="http://www.w3.org/2000/svg">
<path d="M 1 1 H 6 V 5 H 1 Z"
stroke="blue" stroke-width="2" fill="none"/>
<path d="M 9 10 V 7 H 5 V 10 z"
stroke="red" stroke-width="2" fill="none"/>
</svg>
''')
| Python | 0.000001 | |
cc0ef22d0fb122b2c28e6004843978a0ee9e255f | Create Pinject.py | Pinject.py | Pinject.py | import socket
import struct
import sys
from optparse import OptionParser
def checksum(data):
s = 0
n = len(data) % 2
for i in range(0, len(data)-n, 2):
s+= ord(data[i]) + (ord(data[i+1]) << 8)
if n:
s+= ord(data[i+1])
while (s >> 16):
s = (s & 0xFFFF) + (s >> 16)
s = ~s & 0xffff
return s
class ip(object):
def __init__(self, source, destination):
self.version = 4
self.ihl = 5 # Internet Header Length
self.tos = 0 # Type of Service
self.tl = 0 # total length will be filled by kernel
self.id = 54321
self.flags = 0
self.offset = 0
self.ttl = 255
self.protocol = socket.IPPROTO_TCP
self.checksum = 0 # will be filled by kernel
self.source = socket.inet_aton(source)
self.destination = socket.inet_aton(destination)
def pack(self):
ver_ihl = (self.version << 4) + self.ihl
flags_offset = (self.flags << 13) + self.offset
ip_header = struct.pack("!BBHHHBBH4s4s",
ver_ihl,
self.tos,
self.tl,
self.id,
flags_offset,
self.ttl,
self.protocol,
self.checksum,
self.source,
self.destination)
return ip_header
class tcp(object):
def __init__(self, srcp, dstp):
self.srcp = srcp
self.dstp = dstp
self.seqn = 0
self.ackn = 0
self.offset = 5 # Data offset: 5x4 = 20 bytes
self.reserved = 0
self.urg = 0
self.ack = 0
self.psh = 0
self.rst = 0
self.syn = 1
self.fin = 0
self.window = socket.htons(5840)
self.checksum = 0
self.urgp = 0
self.payload = ""
def pack(self, source, destination):
data_offset = (self.offset << 4) + 0
flags = self.fin + (self.syn << 1) + (self.rst << 2) + (self.psh << 3) + (self.ack << 4) + (self.urg << 5)
tcp_header = struct.pack('!HHLLBBHHH',
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window,
self.checksum,
self.urgp)
#pseudo header fields
source_ip = source
destination_ip = destination
reserved = 0
protocol = socket.IPPROTO_TCP
total_length = len(tcp_header) + len(self.payload)
# Pseudo header
psh = struct.pack("!4s4sBBH",
source_ip,
destination_ip,
reserved,
protocol,
total_length)
psh = psh + tcp_header + self.payload
tcp_checksum = checksum(psh)
tcp_header = struct.pack("!HHLLBBH",
self.srcp,
self.dstp,
self.seqn,
self.ackn,
data_offset,
flags,
self.window)
tcp_header+= struct.pack('H', tcp_checksum) + struct.pack('!H', self.urgp)
return tcp_header
def main():
parser = OptionParser()
parser.add_option("-s", "--src", dest="src", type="string",
help="Source IP address", metavar="IP")
parser.add_option("-d", "--dst", dest="dst", type="string",
help="Destination IP address", metavar="IP")
options, args = parser.parse_args()
if options.dst == None:
parser.print_help()
sys.exit()
else:
dst_host = socket.gethostbyname(options.dst)
if options.src == None:
# get the current Network Interface
src_host = socket.gethostbyname(socket.gethostname())
else:
src_host = options.src
print("[+] Local Machine: %s"%src_host)
print("[+] Remote Machine: %s"%dst_host)
s = socket.socket(socket.AF_INET,
socket.SOCK_RAW,
socket.IPPROTO_RAW)
print("[+] Raw scoket created")
data = "TEST!!"
print("[+] Data to inject: %s"%data)
# IP Header
print("[+] Constructing IP Header")
ipobj = ip(src_host, dst_host)
iph = ipobj.pack()
# TCP Header
print("[+] Constructing TCP Header")
tcpobj = tcp(1234, 80)
tcpobj.payload = data
tcph = tcpobj.pack(ipobj.source,
ipobj.destination) # tcp header
# Packet Injection
packet = iph + tcph + data
s.sendto(packet, (dst_host, 0))
print("[+] Packet Injected!")
if __name__=="__main__":
main()
| Python | 0 | |
b0330c5243a326cda18a5b2d9167b0ace302bc13 | Add the SolveHR.py script | SolveHR.py | SolveHR.py | #!/usr/bin/env python3
"""
|-------------+----------------------------------------------------------|
| TITLE | SolveHR.py |
| DESCRIPTION | Fetches problem statement and initializes an answer file |
| | with a template code for selected programming language. |
| | The default language is C++14 (corresponding to -l cc). |
| AUTHOR | simba (szczerbiakadam@gmail.com) |
| DATE | 23-11-2016 |
| VERSION | 0.3 |
| EXAMPLES | SolveHR.py SolveMeFirst \ |
| | https://www.hackerrank.com/challenges/solve-me-first |
|-------------+----------------------------------------------------------|
"""
import os
import sys
import argparse
import datetime
LANGS = {
"c" : {
"NAME" : "ANSI C",
"SHEBANG" : None,
"OPENING_MULTILINE_COMMENT" : "/*",
"CLOSING_MULTILINE_COMMENT" : "*/",
"SKELETON_CODE" : [
"#include <stdio.h>", "", "",
"int main()", "{", " int T, t;", " scanf(\"%d\", &T);",
" for(t = 0; t < T; ++t)",
" {", " }", "", " return 0;", "}",
],
},
"cc" : {
"NAME" : "C++14",
"SHEBANG" : None,
"OPENING_MULTILINE_COMMENT" : "/*",
"CLOSING_MULTILINE_COMMENT" : "*/",
"SKELETON_CODE" : [
"#include <iostream>", "", "",
"int main(int argc, char **argv)", "{",
" int T;",
" std::cin >> T;", "",
" for (int t = 0; t < T; ++t)",
" {", " }", "", " return 0;", "}",
],
},
"py" : {
"NAME" : "Python 3",
"SHEBANG" : "#!/usr/bin/env python3",
"OPENING_MULTILINE_COMMENT" : "\"\"\"",
"CLOSING_MULTILINE_COMMENT" : "\"\"\"",
"SKELETON_CODE" : [
"def main():",
" for t in range(int(input())):",
" pass", "", "",
"if (__name__ == \"__main__\"):",
" main()",
],
}
}
HR_DIRECTORY = os.path.expanduser("~/Source/Learning/HackerRank/")
# Prepares the argument parser:
def init_argparse():
parser = argparse.ArgumentParser()
parser.add_argument("title", help="title of the problem statement")
parser.add_argument("link", help="link to the problem statement site")
parser.add_argument("-l", "--lang",
choices=list(LANGS.keys()),
help="language to be used for solving the challenge")
return parser
def create_solution_file(full_file_name, args):
selected_lang = LANGS[args.lang]
# If the solution file exists already, it will not be overwritten:
if os.path.isfile(full_file_name):
print("File %s exists already - not overwriting." % (full_file_name,))
return
# Holds the consecutive lines of the initial answer file:
lines = []
# Adds the shebang instruction followed by the blank line
# where applicable:
if (selected_lang["SHEBANG"] is not None):
lines.append(selected_lang["SHEBANG"])
lines.append("")
# Adds the opening line of the header comment:
lines.append(selected_lang["OPENING_MULTILINE_COMMENT"])
# Adds the first line of the header comment:
line = "Solution to the \"" + args.title + "\" challenge by HackerRank:"
lines.append(line)
# Adds the challenge hyperlink:
lines.append(args.link)
# Adds the "written on {DATE} in {LANG}":
line = "Written on " + datetime.datetime.today().strftime("%d-%m-%Y") \
+ " in " + selected_lang["NAME"]
lines.append(line)
# Adds the author's signature:
lines.append("by simba (szczerbiakadam@gmail.com)")
# Adds the MIT license information:
lines.append("License: MIT")
# Adds the closing line of the header comment:
lines.append(selected_lang["CLOSING_MULTILINE_COMMENT"])
# Inserts the skeleton code:
lines.append("")
lines.append("")
for entry in selected_lang["SKELETON_CODE"]:
lines.append(entry)
# Writes out the prepared solution to the file:
solution_file = open(full_file_name, "w")
for line in lines:
solution_file.write("%s\n" % (line,))
solution_file.close()
def main():
parser = init_argparse()
args = parser.parse_args()
if args.lang is None:
args.lang = "cc"
print("Chosen language: %s" % (LANGS[args.lang]["NAME"],))
# Makes sure that the directory for a solution exists:
challenge_directory = os.path.join(HR_DIRECTORY, args.title)
os.makedirs(challenge_directory, exist_ok=True)
solution_file_name = os.path.join(challenge_directory,
args.title + "." + args.lang)
# TODO: Add problem statement fetching (the PDF file)
# Prepares the initial solution file:
create_solution_file(solution_file_name, args)
print("Press any key to start the editor (Ctrl-C to bail out):")
input()
# Finally opens the created solution file in the editor:
os.system("vim " + solution_file_name)
if __name__ == "__main__":
main()
| Python | 0.000202 | |
ec484a404752c60a7c88ae84f79b4792c777dfd4 | Define ESCO ua and eu tender models | openprocurement/tender/esco/models.py | openprocurement/tender/esco/models.py | from zope.interface import implementer
from schematics.types import StringType
from openprocurement.api.models import ITender
from openprocurement.tender.openua.models import (
Tender as BaseTenderUA,
)
from openprocurement.tender.openeu.models import (
Tender as BaseTenderEU,
)
@implementer(ITender)
class Tender(BaseTenderUA):
""" """
procurementMethodType = StringType(default="esco.UA")
TenderESCOUA = Tender
@implementer(ITender)
class Tender(BaseTenderEU):
""" """
procurementMethodType = StringType(default="esco.EU")
TenderESCOEU = Tender
| Python | 0 | |
82b9a66ea826b4463d82c69ba1703eab213efe83 | Add test for stack outputs | heat_integrationtests/functional/test_stack_outputs.py | heat_integrationtests/functional/test_stack_outputs.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat_integrationtests.functional import functional_base
class StackOutputsTest(functional_base.FunctionalTestsBase):
template = '''
heat_template_version: 2015-10-15
resources:
test_resource_a:
type: OS::Heat::TestResource
properties:
value: 'a'
test_resource_b:
type: OS::Heat::TestResource
properties:
value: 'b'
outputs:
resource_output_a:
description: 'Output of resource a'
value: { get_attr: [test_resource_a, output] }
resource_output_b:
description: 'Output of resource b'
value: { get_attr: [test_resource_b, output] }
'''
def test_outputs(self):
stack_identifier = self.stack_create(
template=self.template
)
expected_list = [{u'output_key': u'resource_output_a',
u'description': u'Output of resource a'},
{u'output_key': u'resource_output_b',
u'description': u'Output of resource b'}]
actual_list = self.client.stacks.output_list(
stack_identifier)['outputs']
self.assertEqual(expected_list, actual_list)
expected_output_a = {
u'output_value': u'a', u'output_key': u'resource_output_a',
u'description': u'Output of resource a'}
expected_output_b = {
u'output_value': u'b', u'output_key': u'resource_output_b',
u'description': u'Output of resource b'}
actual_output_a = self.client.stacks.output_show(
stack_identifier, 'resource_output_a')['output']
actual_output_b = self.client.stacks.output_show(
stack_identifier, 'resource_output_b')['output']
self.assertEqual(expected_output_a, actual_output_a)
self.assertEqual(expected_output_b, actual_output_b)
| Python | 0.00006 | |
78df4f45ea4b8c04ba8f34d8fc356345998c616b | Add TelnetServer.py under version control. | TelnetServer.py | TelnetServer.py | #!/usr/bin/env python
# coding: utf-8
import socket
import threading
welcome_slogan = '''Welcome novice!\r\n\
Type something and hit enter to see what happens.\r\n\
Be bold!\r\n\r\n'''
help_message = '''Command Description\r\n\
=============================================================\r\n\
HELP Print this help message\r\n\
TALK 'MESSAGE' Talk to other users in the same telnet system\r\n\
EXIT Quit the telnet service\r\n\r\n\
At your service. 20140819\r\n\r\n'''
goodbye_farewell = '''Have a lot of fun!\r\n'''
PS1 = 'TELNET# '
HOST = ''
PORT = 56789
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
clients = [] # list of clients connected
lock = threading.Lock()
class telnetServer(threading.Thread):
def __init__(self, bind):
threading.Thread.__init__(self)
(self.socket, self.address) = bind
def run(self):
lock.acquire()
clients.append(self)
lock.release()
print ('+ %s:%s connected.' % self.address)
self.socket.send(welcome_slogan.encode())
while True:
self.socket.send(PS1.encode())
data = self.socket.recv(1024)
temp = data.decode().strip()
if not data:
break
elif temp.upper() in ['BY', 'BYE', 'QUIT', 'EXIT']:
break
elif temp.lower() in ['?', 'help']:
self.socket.send(help_message.encode())
elif temp.startswith('#') or temp == '':
pass
elif temp[:5].upper() == 'TALK ':
print ('%s %s' % (self.address, temp[5:]))
for c in clients:
c.socket.send(('%s %s\r\n' % (self.address, temp[5:])).encode())
else:
self.socket.send(data)
self.socket.send(goodbye_farewell.encode())
self.socket.close()
print ('- %s:%s disconnected.' % self.address)
lock.acquire()
clients.remove(self)
lock.release()
while True: # wait for socket to connect
# send socket to telnetserver and start monitoring
telnetServer(s.accept()).start()
| Python | 0 | |
8b2eb3bece67a1eb81a6165238205b05361f2ec3 | fix key case | corehq/apps/ota/tasks.py | corehq/apps/ota/tasks.py | from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(domain, usernames_or_ids, version=V1, cache_timeout_hours=None,
overwrite_cache=False, check_cache_only=False):
"""
Task to generate and cache a restore payload for each user passed in.
:param domain: The domain name for the users
:param usernames_or_ids: List of usernames or user IDs
:param version: Restore format version
:param cache_timeout_hours: Hours to cache the payload
:param overwrite_cache: If True overwrite any existing cache
:param check_cache_only: Don't generate the payload, just check if it is already cached
"""
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('WARNING: User not found: {}'.format(username_or_id))
continue
elif couch_user.domain != domain:
ret['messages'].append("WARNING: User '{}' not from domain '{}'".format(
username_or_id,
domain
))
continue
try:
project = couch_user.project
commtrack_settings = project.commtrack_settings
stock_settings = commtrack_settings.get_ota_restore_settings() if commtrack_settings else None
restore_config = RestoreConfig(
couch_user.to_casexml_user(), None, version, None,
items=True,
stock_settings=stock_settings,
domain=project,
force_cache=True,
cache_timeout=cache_timeout_hours * 60 * 60,
overwrite_cache=overwrite_cache
)
if check_cache_only:
cached_payload = _get_cached_payload(restore_config)
ret['messages'].append('Restore cache {} for user: {}'.format(
'EXISTS' if cached_payload else 'does not exist',
couch_user.human_friendly_name,
))
else:
restore_config.get_payload()
cached_payload = _get_cached_payload(restore_config)
if cached_payload:
ret['messages'].append('SUCCESS: Restore cached successfully for user: {}'.format(
couch_user.human_friendly_name,
))
else:
ret['messages'].append('ERROR: Restore completed by cache still empty for user: {}'.format(
couch_user.human_friendly_name,
))
except Exception as e:
ret['messages'].append('ERROR: Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def _get_cached_payload(restore_config):
original = restore_config.overwrite_cache
try:
# must set this to False before attempting to check the cache
restore_config.overwrite_cache = False
payload = restore_config.get_cached_payload()
finally:
restore_config.overwrite_cache = original
return payload
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| from celery.task import task
from couchdbkit.exceptions import ResourceNotFound
from casexml.apps.case.xml import V1
from casexml.apps.phone.restore import RestoreConfig
from corehq.apps.users.models import CommCareUser
from soil import DownloadBase
@task
def prime_restore(domain, usernames_or_ids, version=V1, cache_timeout_hours=None,
overwrite_cache=False, check_cache_only=False):
"""
Task to generate and cache a restore payload for each user passed in.
:param domain: The domain name for the users
:param usernames_or_ids: List of usernames or user IDs
:param version: Restore format version
:param cache_timeout_hours: Hours to cache the payload
:param overwrite_cache: If True overwrite any existing cache
:param check_cache_only: Don't generate the payload, just check if it is already cached
"""
total = len(usernames_or_ids)
DownloadBase.set_progress(prime_restore, 0, total)
ret = {'messages': []}
for i, username_or_id in enumerate(usernames_or_ids):
couch_user = get_user(username_or_id)
if not couch_user:
ret['messages'].append('WARNING: User not found: {}'.format(username_or_id))
continue
elif couch_user.domain != domain:
ret['messages'].append("WARNING: User '{}' not from domain '{}'".format(
username_or_id,
domain
))
continue
try:
project = couch_user.project
commtrack_settings = project.commtrack_settings
stock_settings = commtrack_settings.get_ota_restore_settings() if commtrack_settings else None
restore_config = RestoreConfig(
couch_user.to_casexml_user(), None, version, None,
items=True,
stock_settings=stock_settings,
domain=project,
force_cache=True,
cache_timeout=cache_timeout_hours * 60 * 60,
overwrite_cache=overwrite_cache
)
if check_cache_only:
cached_payload = _get_cached_payload(restore_config)
ret['MESSAGES'].append('Restore cache {} for user: {}'.format(
'EXISTS' if cached_payload else 'does not exist',
couch_user.human_friendly_name,
))
else:
restore_config.get_payload()
cached_payload = _get_cached_payload(restore_config)
if cached_payload:
ret['messages'].append('SUCCESS: Restore cached successfully for user: {}'.format(
couch_user.human_friendly_name,
))
else:
ret['messages'].append('ERROR: Restore completed by cache still empty for user: {}'.format(
couch_user.human_friendly_name,
))
except Exception as e:
ret['messages'].append('ERROR: Error processing user: {}'.format(str(e)))
DownloadBase.set_progress(prime_restore, i + 1, total)
return ret
def _get_cached_payload(restore_config):
original = restore_config.overwrite_cache
try:
# must set this to False before attempting to check the cache
restore_config.overwrite_cache = False
payload = restore_config.get_cached_payload()
finally:
restore_config.overwrite_cache = original
return payload
def get_user(username_or_id):
try:
couch_user = CommCareUser.get(username_or_id)
except ResourceNotFound:
try:
couch_user = CommCareUser.get_by_username(username_or_id)
except ResourceNotFound:
return None
return couch_user
| Python | 0.999689 |
eaeb02839913136909cccc9a99612a1eb7145b97 | support state hash in ota restore if specified | corehq/apps/ota/views.py | corehq/apps/ota/views.py | from corehq.apps.users.models import CouchUser
from django_digest.decorators import *
from casexml.apps.phone.restore import generate_restore_response
@httpdigest
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
restore_id = request.GET.get('since')
api_version = request.GET.get('version', "1.0")
state_hash = request.GET.get('state')
username = user.username
couch_user = CouchUser.from_django_user(user)
if not couch_user.is_commcare_user():
response = HttpResponse("No linked chw found for %s" % username)
response.status_code = 401 # Authentication Failure
return response
return generate_restore_response(couch_user.to_casexml_user(), restore_id,
api_version, state_hash)
| from corehq.apps.users.models import CouchUser
from django_digest.decorators import *
from casexml.apps.phone.restore import generate_restore_payload
@httpdigest
def restore(request, domain):
"""
We override restore because we have to supply our own
user model (and have the domain in the url)
"""
user = request.user
restore_id = request.GET.get('since')
api_version = request.GET.get('version', "1.0")
username = user.username
couch_user = CouchUser.from_django_user(user)
if not couch_user.is_commcare_user():
response = HttpResponse("No linked chw found for %s" % username)
response.status_code = 401 # Authentication Failure
return response
response = generate_restore_payload(couch_user.to_casexml_user(), restore_id,
api_version)
return HttpResponse(response, mimetype="text/xml") | Python | 0 |
dca0404e6f14194be3a5926e522bbeea375e8456 | add net spider rokic's version | crawler/discount_info.py | crawler/discount_info.py | import json
import requests
from bs4 import BeautifulSoup
DOMAIN = ""
API = "http://%s/api/" % (DOMAIN)
STEAMDB_SALE_URL = "https://steamdb.info/sales/?merged=true&cc=cn"
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0'
}
r = requests.get(STEAMDB_SALE_URL, header=headers)
content = r.content.decode().replace('\n', '')
jar = BeautifulSoup(content, 'lxml').tbody
sweets = ['name', 'discount', 'price', 'rating']
box = []#空箱子
for cookies in jar:#拿出罐子里的曲奇饼
try:
bottle = {'id':cookies['data-appid'], 'type':'game'}#装红酒
except KeyError:
bottle = {'id':cookies['data-subid'], 'type':'package'}#或者装白酒
cast = lambda magic: None if not magic else magic.string if magic.string else cast(magic.findChild())
flour = cookies.findChildren('td')#揉揉面粉
biscuits = [cast(i) for i in flour[2:5] + [flour[6]]]#做点小饼干
bottle.update(zip(sweets, biscuits))#每瓶酒附赠点零食
box.append(bottle) #装箱
request.post(API, json.dumps(box)) | Python | 0 | |
1602513f2ee508ed70ec08af90a94cf150d14189 | Add grep_token_logs.py | skolo/grep_token_logs.py | skolo/grep_token_logs.py | #!/usr/bin/env python
# Copyright 2018 Google LLC.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Search the syslog on a jumphost to determine when auth tokens changed."""
import sys
SYSLOG = '/var/log/syslog'
WHITELIST_LINES = [
# (process-name, pattern)
('metadata-server', 'Updated token: '),
('metadata-server', 'Token requested by '),
('get-oauth2-token', 'Wrote new auth token: '),
]
def transform_line(line):
"""Trim the log line and return it iff it matches a whitelisted pattern."""
for proc, pattern in WHITELIST_LINES:
if pattern in line:
# Log lines look like this:
# pylint: disable=line-too-long
# Mar 12 09:58:43 jumphost-win-02 metadata-server[5259]: I0312 09:58:43.756257 5259 server.go:87] Updated token: [redacted]
timestamp = line.split('jumphost', 1)[0]
suffix = line.split(pattern, 1)[1].rstrip()
return timestamp + proc + ': ' + pattern + suffix
return None
def read_syslog():
"""Read the syslog, returning any relevant lines."""
lines = []
with open(SYSLOG, 'rb') as f:
for line in f:
tf = transform_line(line)
if tf:
lines.append(tf)
return lines
def filter_logs(ip, log_lines):
"""Filter the log lines to only those relevant to a particular IP address."""
# First, collect all tokens used by the IP address.
tokens = []
for line in log_lines:
if ip and ip in line:
tok = line.split(', serving ', 1)[1]
tokens.append(tok)
# Filter to only lines which contain the IP address or one of its tokens.
filtered = []
for line in log_lines:
if ip in line:
filtered.append(line)
else:
for tok in tokens:
# We don't care about other bots which used the token.
if tok in line and not 'Token requested by' in line:
filtered.append(line)
return filtered
def main():
"""Read the syslog, filter to relevant lines, then print them."""
lines = read_syslog()
if len(sys.argv) > 1:
lines = filter_logs(sys.argv[1], lines)
for line in lines:
print line
if __name__ == '__main__':
main()
| Python | 0.000297 | |
04270ab58f88302f7b0fcd314ae29258c1c9a043 | create mi matrix | data/build_probe_data.py | data/build_probe_data.py | import tensorflow.compat.v1 as tf
from tqdm import tqdm
from collections import defaultdict
import sklearn
import numpy as np
import json
with tf.io.gfile.GFile("gs://e2e_central/data/ml-sequences-train.tsv", 'r') as f:
sequence_list = list(f)
data = []
for sequence_str in tqdm(sequence_list):
data.append([x.strip() for x in sequence_str.replace("\n", "").replace('\t', "").split("@") if x.strip() != ""])
with tf.io.gfile.GFile("gs://e2e_central/data/ml-sequences-test.tsv", 'r') as f:
sequence_list = list(f)
for sequence_str in tqdm(sequence_list):
data.append([x.strip() for x in sequence_str.replace("\n", "").replace('\t', "").split("@") if x.strip() != ""])
# def write_json(filepath, dictionary):
# with tf.io.gfile.GFile(filepath, 'w') as f:
# json.dump(dictionary, filepath)
# def write_json(filepath, dictionary):
# with tf.io.gfile.GFile(filepath, 'w') as f:
# json.dump(dictionary, filepath)
movie_set = set()
popularity = defaultdict(int)
for seq in data:
for movie in seq:
movie_set.add(movie)
popularity[movie] += 1
# for seq in data:
# if len(set(seq)) != 10:
# print(seq)
num_sequences = len(data)
popular_movies = list(sorted(movie_set, key=lambda x: popularity[x], reverse=True))
movie_set = sorted(movie_set)
vocab_size = len(movie_set)
embed = dict(zip(movie_set, list(range(vocab_size))))
unembed = dict(zip(list(range(vocab_size)), movie_set))
movie_ids = {
"all_movies": movie_set,
"movie_count": vocab_size,
"movie_to_id": embed,
"id_to_movie": unembed,
"popularity": popularity
}
with tf.io.gfile.GFile("gs://e2e_central/data/probes/movie_id_info.json", 'w') as f:
json.dump(movie_ids, f)
def create_cooccurrence(sequences):
co_matrix = np.zeros((vocab_size, vocab_size))
print("building cooccurrence matrix")
for seq in tqdm(sequences):
for movie1 in seq:
for movie2 in seq:
co_matrix[embed[movie1]][embed[movie2]] += 1
return co_matrix
def get_mutual_info(co_matrix):
total = np.sum(co_matrix)
popularities = np.array([popularity[unembed[x]] for x in range(vocab_size)])
pxy = co_matrix / num_sequences
px = popularities / num_sequences
py = (popularities / num_sequences).T
mutual_info = np.log(pxy / (px @ py))
return mutual_info
co_matrix = create_cooccurrence(data)
mi = get_mutual_info(co_matrix)
with tf.io.gfile.GFile("gs://e2e_central/data/probes/co_matrix.npy", 'w') as f:
np.save(f, co_matrix)
with tf.io.gfile.GFile("gs://e2e_central/data/probes/mi_matrix.npy", 'w') as f:
np.save(f, mi)
def get_related_movies(movie="The Lord of the Rings: The Return of the King (2003)"):
movie_number = embed[movie]
row = co_matrix[movie_number, :]
return [unembed[x] for x in np.argsort(row)][::-1]
print(get_related_movies()[:10])
print("popular: ", popular_movies[:10])
print("unpopular: ", popular_movies[-10:])
def display_10(matrix):
pop = popular_movies[:10]
pop_ids = [embed[x] for x in pop]
print(pop)
print(matrix[pop_ids, :][:, pop_ids])
display_10(co_matrix)
display_10(mi)
| Python | 0.000001 | |
1f40c23eedc638c1e398a23b4d12c78aff93c6da | Implement a method for the Authenticator to extract the authenticated user from the token cookie. | authenticator_test.py | authenticator_test.py | """Unit test for the AncientAuth authenticator class."""
from datetime import datetime
from Crypto.PublicKey.RSA import importKey
import authenticator
import calendar
import token_cookie
import token_pb2
import unittest
_TEST_KEY = """-----BEGIN PRIVATE KEY-----
MIICdwIBADANBgkqhkiG9w0BAQEFAASCAmEwggJdAgEAAoGBAMgTwGPXIBLojKVq
sJRUecNhOfPPuKEwM/z6h2qnAvxZpTystm4JO/NWJbJ8DBPXlAMkk49FJqk8D7xb
liY6uwMgZtLPg8vtMAxpLt9oKddYHC/xpYFeE0nsM1CO1IeA+/2c7KvVwp5wmtpK
nOCDJWxjJXLy4XbjjF9LQWv1kBRjAgMBAAECgYBG9hpqTsZlbm1Tzf9K3JtDHJy9
SJMnOD14IDMfNJTug60DVA5wAH5u08MTBsQR1Yf9hV+AlPodU9wQ5jre3D2vQabn
SP35fV2xaJzZdoXjel/fWMKSJGEsFg4E99eGEevygjxXZWKs1cqWrMKnt/0vQURX
krwR1gnULdmEBwwqoQJBAOSCfqN9W35Vhn3DJYIENFTn2pTFx//5USRlP0dD3djG
WbeHXQMxR2+/KfM5im+xcEDpsYIY8mW8vto9fMNy/hcCQQDgJZtot9zm9HDKy7Kj
DzDopZQLko2Lh3EZ/LtaXvLFe8UiEj9XJgsBIPsyaWkUD1Q3KeeDgqQZajBqKxP5
lveVAkEAo9IKCBtu5HtcF/03fqaU/enagp3obFLJIVaUrvqwqSBKYZDh1dAWbr6V
zJGL9dc3qtHfOG26GcXe7Yb3Uwe1sQJBAJFttQRfbtLmPBxHx3JmU8xOSdysTGwA
B5Dd2k0LF6ar5D5z6mbHxxIHbRPLMqMSQwi7hntcEs5uiFUJ+B7TJXUCQD0cmQEn
onuBkmfkSL5GC74M1MvjnwHwoIOA9HsfVnaGjtUgmwWoYRA3KWQoB1u3BhBT6bFC
5/dZFrKYHgp1+pQ=
-----END PRIVATE KEY-----
"""
_TEST_CERT = """-----BEGIN CERTIFICATE-----
MIICIDCCAYmgAwIBAgIBADANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDDAdUZXN0
IENBMCAXDTE0MDEyMDA1MTUzNloYDzIxMTMxMjI3MDUxNTM2WjAbMRkwFwYDVQQD
DBBUZXN0IENlcnRpZmljYXRlMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDI
E8Bj1yAS6IylarCUVHnDYTnzz7ihMDP8+odqpwL8WaU8rLZuCTvzViWyfAwT15QD
JJOPRSapPA+8W5YmOrsDIGbSz4PL7TAMaS7faCnXWBwv8aWBXhNJ7DNQjtSHgPv9
nOyr1cKecJraSpzggyVsYyVy8uF244xfS0Fr9ZAUYwIDAQABo3sweTAJBgNVHRME
AjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0
ZTAdBgNVHQ4EFgQUt3K0lx1RbBOzBTsd4axqVwXryB8wHwYDVR0jBBgwFoAUkkmw
1vM/7BfNZfVyAYdtWNIOUSUwDQYJKoZIhvcNAQELBQADgYEAE9mEQC4o9ARUuDZD
rHUiL24AFhiZaWyRswsWEuDS9y4KGk0FxeswGLhPRr8UhHppWu/zG36IzlpAIihv
kZiJrldQGN58P4vW/2x5gaqEtv/GMgnK58KntHI/JNczRgTfpScJo2Yy/iImB7xR
kTOQLEMHLOKdUomfTE3bslbH9u8=
-----END CERTIFICATE-----
"""
_TEST_CA = """-----BEGIN CERTIFICATE-----
MIIB9DCCAV2gAwIBAgIJAKHxW+D7fp9HMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV
BAMMB1Rlc3QgQ0EwIBcNMTQwMTIwMDUwOTE3WhgPMjExMzEyMjcwNTA5MTdaMBIx
EDAOBgNVBAMMB1Rlc3QgQ0EwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ4S
4rS375IHRkXRxFITRE7DVZEXhnrQRMOzgr1gwhyhWBUGEugLYo7uVoO9E2npdL1N
MZkJV60AahuacVxmqjB4ippm2QVBPNJocAJLbfEr/luUEZkYRWFVyNbQL5K0WH71
NFocMP59dDs+Ib888o1NGpwwv95upbGjDJapiiILAgMBAAGjUDBOMB0GA1UdDgQW
BBSSSbDW8z/sF81l9XIBh21Y0g5RJTAfBgNVHSMEGDAWgBSSSbDW8z/sF81l9XIB
h21Y0g5RJTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4GBAG7Iaknm91s7
94Bv+9gt/2OekdxHNsdyyoVUoZl7r2fOMklwduiPCeKsjwluGo01gu4mZGPKF7F/
j/CO1MYpyCm2YnwMc6eQzCUtJpxPcQi3AQzL2G80QCIBgFmG+wCBrYKRtIEFlKn6
MAtouJXWLkCCY3IH5UoY7ObrIK639szY
-----END CERTIFICATE-----
"""
class AuthenticatorTest(unittest.TestCase):
"""Test the different methods of the AncientAuth authenticator."""
def test_authenticated_user(self):
"""Test if we can extract the user from a TokenCookie."""
key = importKey(_TEST_KEY)
token = token_pb2.TokenCookie()
token.basic_creds.user_name = 'testosteronius'
token.basic_creds.scope.append('users')
token.basic_creds.expires = calendar.timegm(
datetime.utcnow().timetuple()) + 30
codec = token_cookie.TokenCookieCodec(token, privkey=key)
cookie = codec.encode()
auth = authenticator.Authenticator("Unit Test", cert=_TEST_CERT)
self.assertEquals(auth.get_authenticated_user(cookie),
'testosteronius')
| Python | 0 | |
417ff63118c967205ee630c5183b19a949a6c157 | Add migrations for indicadores. | indicadores/migrations/0002_auto_20170224_1535.py | indicadores/migrations/0002_auto_20170224_1535.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-02-24 15:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('indicadores', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ingreso',
name='fecha',
field=models.DateField(),
),
]
| Python | 0 | |
b7bf4586fea207453225a87fb85df59ccfc94e80 | Add missing migration related to django-simple-history update | jarbas/core/migrations/0032_auto_20170613_0641.py | jarbas/core/migrations/0032_auto_20170613_0641.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-13 09:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0031_add_index_together_for_reimbursement'),
]
operations = [
migrations.AlterField(
model_name='historicalreimbursement',
name='history_type',
field=models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1),
),
]
| Python | 0.000001 | |
60f051590a61ec4435f9bc5d46e430c5feb36f16 | Add agent | agent/agent.py | agent/agent.py | #!/usr/bin/env python
#http://www.acmesystems.it/python_httpd
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import json, subprocess, os, time
HELLO_MESSAGE = {'message':'hello, please use JSON via POST!'}
ERROR_JSON_MESSAGE = {'message':'POST content type must be application/json!'}
ERROR_BADJSON_MESSAGE = {'message':'POST content must be valid json!'}
ERROR_BADPASS_MESSAGE = {'message':'Wrong secret key!'}
ERROR_CMDERROR_MESSAGE = {'message':'Bad command!'}
ACTIONS = {'run-test', 'self-test'}
TMP = os.path.abspath(r'./tmp')
#TEST_DRIVER = os.path.abspath(r'../chrome-webpage-profiler/test_driver.py')
TEST_DRIVER = os.path.abspath(r'/bin/cat')
# NOTE: the key is to avoid unintentional access, not to secure the agent
SECRET_KEY = '1a2b'
def run_test(body):
if not body.get('tests-config'):
return json.dumps({'message': ERROR_CMDERROR_MESSAGE})
if not os.path.isdir(TMP):
try:
os.makedirs(TMP)
except Exception as _:
msg = 'Error making output directory: %s', TMP
return json.dumps({'message': msg})
if not os.path.isfile(TEST_DRIVER):
msg = 'No test driver found at %s' % TEST_DRIVER
return json.dumps({'message': msg})
jobId = "%d"%(time.time()*1000)
jobIdIndex = jobId[-5:]
jobIdIndexPath = os.path.join(TMP, jobIdIndex)
jobIdPath = os.path.join(jobIdIndexPath, jobId)
testConfig = os.path.join(jobIdPath, 'tests.json')
if not os.path.isdir(jobIdIndexPath):
try:
os.makedirs(jobIdIndexPath)
except Exception as _:
msg = 'Error making output directory: %s', jobIdIndexPath
return json.dumps({'message': msg})
if not os.path.isdir(jobIdPath):
try:
os.makedirs(jobIdPath)
except Exception as _:
msg = 'Error making output directory: %s', jobIdPath
return json.dumps({'message': msg})
tests = body['tests-config']
with open(testConfig, 'w') as outfile:
json.dump(tests, outfile)
p = subprocess.Popen([TEST_DRIVER, testConfig], cwd=jobIdPath)
rc = p.wait()
if rc == 0:
response = {'message': 'OK. Done', 'job-id': jobId}
response['files'] = []
for f in os.listdir(jobIdPath):
response['files'].append(os.path.join('/tmp/', jobIdIndex, jobId, f))
return json.dumps(response)
else:
return json.dumps({'message': 'FAIL. return code%d'%rc})
def self_test():
response = {'message': 'self test done', 'results': {} }
rc = subprocess.check_output('df -h; exit 0', stderr=subprocess.STDOUT, shell=True)
response['results']['df'] = rc
return json.dumps(response)
def execute_POST(body):
try:
body = json.loads(body)
except ValueError as _:
return json.dumps(ERROR_BADJSON_MESSAGE)
if body.get('key') != SECRET_KEY:
return json.dumps(ERROR_BADPASS_MESSAGE)
if body.get('action') not in ACTIONS:
return json.dumps(ERROR_CMDERROR_MESSAGE)
if body['action'] == 'run-test':
return run_test(body)
elif body['action'] == 'self-test':
return self_test()
class S(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
self._set_headers()
body = json.dumps(HELLO_MESSAGE)
self.wfile.write(body)
def do_HEAD(self):
self._set_headers()
def do_POST(self):
self._set_headers()
content_len = int(self.headers.getheader('content-length', 0))
content_type = self.headers.getheader('content-type', 0)
if content_type.lower() != 'application/json':
response_body = json.dumps(ERROR_JSON_MESSAGE)
else:
post_body = self.rfile.read(content_len)
response_body = execute_POST(post_body)
self.wfile.write(response_body)
def run(server_class=HTTPServer, handler_class=S, port=80):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print 'Starting httpd...'
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
| Python | 0.000001 | |
7082f80d5be56073d9d2a66653188b2cee248a8e | add basic tests of search and matrix views | src/encoded/tests/test_search.py | src/encoded/tests/test_search.py | # Use workbook fixture from BDD tests (including elasticsearch)
from .features.conftest import app_settings, app, workbook
def test_search_view(workbook, testapp):
res = testapp.get('/search/').json
assert res['@type'] == ['Search']
assert res['@id'] == '/search/'
assert res['@context'] == '/terms/'
assert res['notification'] == 'Success'
assert res['title'] == 'Search'
assert res['total'] > 0
assert 'facets' in res
assert 'filters' in res
assert 'columns' in res
assert '@graph' in res
def test_matrix_view(workbook, testapp):
res = testapp.get('/experiments/matrix').json
assert res['@type'] == ['Matrix']
assert res['@id'] == '/experiments/matrix'
assert res['@context'] == '/terms/'
assert res['notification'] == 'Success'
assert res['title'] == 'Experiment Matrix'
assert res['total'] > 0
assert 'facets' in res
assert 'filters' in res
assert 'matrix' in res
assert res['matrix']['max_cell_doc_count'] > 0
assert res['matrix']['search_base'] == '/search/?type=experiment'
assert res['matrix']['x']['group_by'] == 'assay_term_name'
assert res['matrix']['x']['label'] == 'Assay'
assert res['matrix']['x']['limit'] == 20
assert len(res['matrix']['x']['buckets']) > 0
assert len(res['matrix']['x']['facets']) > 0
assert res['matrix']['y']['group_by'] == ['replicates.library.biosample.biosample_type', 'biosample_term_name']
assert res['matrix']['y']['label'] == 'Biosample'
assert res['matrix']['y']['limit'] == 5
assert len(res['matrix']['y']['replicates.library.biosample.biosample_type']['buckets']) > 0
assert len(res['matrix']['y']['replicates.library.biosample.biosample_type']['buckets'][0]['biosample_term_name']['buckets']) > 0
| Python | 0 | |
2cd1da31b099cbf37552b2a049c3df6619e0e64f | Add helper enums for type encodings | rma/redis_types.py | rma/redis_types.py | REDIS_ENCODING_ID_RAW = 0
REDIS_ENCODING_ID_INT = 1
REDIS_ENCODING_ID_EMBSTR = 2
REDIS_ENCODING_ID_HASHTABLE = 3
REDIS_ENCODING_ID_ZIPLIST = 4
REDIS_ENCODING_ID_LINKEDLIST = 5
REDIS_ENCODING_ID_QUICKLIST =6
REDIS_ENCODING_ID_INTSET = 7
REDIS_ENCODING_ID_SKIPLIST = 8
REDIS_ENCODING_STR_TO_ID_LIB = {
b'raw': REDIS_ENCODING_ID_RAW,
b'int': REDIS_ENCODING_ID_INT,
b'embstr': REDIS_ENCODING_ID_EMBSTR,
b'hashtable': REDIS_ENCODING_ID_HASHTABLE,
b'ziplist': REDIS_ENCODING_ID_ZIPLIST,
b'linkedlist': REDIS_ENCODING_ID_LINKEDLIST,
b'quicklist': REDIS_ENCODING_ID_QUICKLIST,
b'intset': REDIS_ENCODING_ID_INTSET,
b'skiplist': REDIS_ENCODING_ID_SKIPLIST,
}
REDIS_ENCODING_ID_TO_STR_LIB = dict((v, k) for k, v in REDIS_ENCODING_STR_TO_ID_LIB.items())
def redis_encoding_str_to_id(key_encoding):
if key_encoding in REDIS_ENCODING_STR_TO_ID_LIB:
return REDIS_ENCODING_STR_TO_ID_LIB[key_encoding]
raise ValueError("Invalid encoding `%s` given" % key_encoding)
def redis_encoding_id_to_str(key_encoding):
if key_encoding in REDIS_ENCODING_ID_TO_STR_LIB:
return REDIS_ENCODING_ID_TO_STR_LIB[key_encoding].decode('utf8')
raise ValueError("Invalid encoding `%s` given" % key_encoding)
| Python | 0 | |
6e577ecf55c107254816055ea810183b66e734b6 | Add management command to tag older icds sms with indicator metadata | custom/icds/management/commands/tag_icds_sms.py | custom/icds/management/commands/tag_icds_sms.py | from corehq.apps.sms.models import SMS
from corehq.messaging.smsbackends.icds_nic.models import SQLICDSBackend
from datetime import datetime
from django.core.management.base import BaseCommand
SUBSTRINGS = {
'hin': {
'aww_1': u'\u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 \u0915\u0947\u0902\u0926\u094d\u0930 \u0926\u094d\u0935\u093e\u0930\u093e \u090f\u0915',
'aww_2': u'\u091f\u0940.\u090f\u091a . \u0930. \u0935\u093f\u0924\u0930\u0923 :',
'ls_1': u'\u091f\u0940.\u090f\u091a.\u0930.\u0935\u093f\u0924\u0930\u0923 :',
'ls_2': u'\u0928\u093f\u092e\u094d\u0932\u093f\u0916\u093f\u0924 \u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 ',
'ls_6': u'\u0906\u0901\u0917\u0928\u0935\u093e\u095c\u0940 \u0915\u0947\u0902\u0926\u094d\u0930\u094b\u0902 \u0926\u094d\u0935\u093e\u0930\u093e',
},
'tel': {
'aww_1': u'\u0c05\u0c02\u0c17\u0c28\u0c4d \u0c35\u0c3e\u0c21\u0c40 \u0c15\u0c47\u0c02\u0c26\u0c4d\u0c30\u0c02 ICDS',
'aww_2': u'\u0c17\u0c43\u0c39 \u0c38\u0c02\u0c26\u0c30\u0c4d\u0c36\u0c28\u0c32\u0c41:',
'ls_1': u'\u0c17\u0c43\u0c39 \u0c38\u0c02\u0c26\u0c30\u0c4d\u0c36\u0c28\u0c32\u0c41 ',
'ls_2': u'\u0c17\u0c24 \u0c28\u0c46\u0c32 \u0c30\u0c4b\u0c1c\u0c41\u0c32\u0c4d\u0c32\u0c4b',
'ls_6': u'\u0c35\u0c3e\u0c30\u0c3f\u0c15\u0c3f \u0c24\u0c17\u0c3f\u0c28 \u0c38\u0c39\u0c3e\u0c2f\u0c02',
},
}
class Command(BaseCommand):
help = ""
def add_arguments(self, parser):
parser.add_argument('domain')
def get_indicator_slug(self, sms):
last_match = None
num_matches = 0
for lang_code, data in SUBSTRINGS.items():
for slug, substring in data.items():
if substring in sms.text:
last_match = slug
num_matches += 1
return last_match, num_matches
def handle(self, domain, **options):
for sms in SMS.objects.filter(
domain=domain,
backend_api=SQLICDSBackend.get_api_id(),
direction='O',
processed=True,
date__lt=datetime(2017, 6, 26),
):
if sms.custom_metadata:
continue
slug, num_matches = self.get_indicator_slug(sms)
if num_matches == 1:
sms.custom_metadata = {'icds_indicator': slug}
sms.save()
| Python | 0 | |
ea522fc3cdcec3d7e774cdaa93a36ef22c221432 | Add file for parsing eyelink data | moss/eyelink.py | moss/eyelink.py | import os
import subprocess
import tempfile
import shutil
import numpy as np
import pandas as pd
class EyeData(object):
def __init__(self, edf_file=None, asc_file=None):
if edf_file is None and asc_file is None:
raise ValueError("Must pass either EDF or ASCII file")
self.settings = dict(PRESCALER=None,
VPRESCALER=None,
PUPIL=None,
EVENTS=None,
SAMPLES=None)
self.messages = pd.Series(index=pd.Int64Index([], name="timestamp"))
self.eye_data = []
self.fixations = []
self.saccades = []
self.blinks = []
# Obtain eye data in ASCII format
if asc_file is None:
temp_dir = tempfile.mkdtemp()
asc_file = self.edf_to_asc(edf_file, temp_dir)
else:
temp_dir = None
# Process the eye data file
self.parse_asc_file(asc_file)
# Convert to better representations of the data
eye_data = pd.DataFrame(self.eye_data,
columns=["timestamp", "x", "y", "pupil"])
self.eye_data = (eye_data.replace({".": np.nan})
.apply(pd.to_numeric)
.set_index("timestamp"))
fix_columns = ["start", "end", "duration", "x", "y", "pupil"]
fixations = pd.DataFrame(self.fixations, columns=fix_columns)
self.fixations = fixations.replace({".": np.nan}).apply(pd.to_numeric)
sacc_columns = ["start", "end", "duration",
"start_x", "start_y", "end_x", "end_y",
"amplitude", "peak_velocity"]
saccades = pd.DataFrame(self.saccades, columns=sacc_columns)
self.saccades = saccades.replace({".": np.nan}).apply(pd.to_numeric)
blink_columns = ["start", "end", "duration"]
blinks = pd.DataFrame(self.blinks, columns=blink_columns)
self.blinks = blinks.replace({".": np.nan}).apply(pd.to_numeric)
# Clean up
if temp_dir is not None:
shutil.rmtree(temp_dir)
def edf_to_asc(self, edf_file, temp_dir):
subprocess.call(["edf2asc",
"-p", temp_dir,
edf_file])
self._temp_dir = temp_dir
edf_basename = os.path.basename(edf_file)
asc_basename = edf_basename[:-3] + "asc"
asc_file = os.path.join(temp_dir, asc_basename)
return asc_file
def parse_asc_file(self, asc_file):
with open(asc_file) as fid:
for line in fid:
self.parse_line(line)
def parse_line(self, line):
if not line[0].strip():
return
if line.startswith("*"):
return
fields = line.split()
if fields[0] in self.settings:
self.settings[fields[0]] = " ".join(fields[1:])
if fields[0] == "MSG":
timestamp = int(fields[1])
self.messages.loc[timestamp] = " ".join(fields[2:])
if fields[0] in ["SFIX", "SSACC", "SBLINK"]:
return
# Note that we are not reading the eye field for events, assuming
# that we are in monocular mode (as we always should be).
# This makes it simpler to convert data to numeric after parsing.
if fields[0] in ["EFIX"]:
self.fixations.append(fields[2:])
if fields[0] in ["ESACC"]:
self.saccades.append(fields[2:])
if fields[0] in ["EBLINK"]:
self.blinks.append(fields[2:])
try:
timestamp = int(fields[0])
except ValueError:
return
self.eye_data.append(fields[:4])
| Python | 0 | |
901046879338b1bc19de59675c7eb513bbc2c517 | add problem 19 | euler019.py | euler019.py | #!/usr/bin/env python
firsts = [1]
jan = 31
mar_dec = [31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year in range(1901,2001):
firsts.append(firsts[-1] + jan)
if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
feb = 29
else:
feb = 28
firsts.append(firsts[-1] + feb)
for mon in mar_dec:
firsts.append(firsts[-1] + mon)
print sum([1 for i in firsts if i%7==6])
| Python | 0.001255 | |
dfa5bee0720f8d4b5f3ac2309915090239780045 | Test Flask file | flaskweb.py | flaskweb.py | from flask import Flask, request, jsonify
app = Flask(__name__)
@app.route("/hello/<name>")
def hello(name):
return "Hello World! %s" % name
@app.route("/data/")
def temptime():
arr = {"temp": [20, 21, 21],"time":[10,20,30],"unit":"s"}
return jsonify(arr)
@app.route("/add", methods = ['POST'])
def sum():
r = request.get_json()
a = r['a']
b = r['b']
sum = a + b
return '{:d}'.format(sum)
| Python | 0.000001 | |
02f84b8cf3c3dd77b6d84d9ccea979c8de23eaa5 | Add Awesome renderers | src/common/renderers.py | src/common/renderers.py | import time
from rest_framework.renderers import JSONRenderer
from django.shortcuts import resolve_url
from django.template.loader import render_to_string
from django.utils.encoding import force_str
from django.utils.functional import Promise
from rest_framework.renderers import BaseRenderer, JSONRenderer, TemplateHTMLRenderer
from rest_framework.utils import encoders, json
# from drf_yasg.app_settings import redoc_settings, swagger_settings
# from drf_yasg.codecs import VALIDATORS, OpenAPICodecJson, OpenAPICodecYaml
# from drf_yasg.openapi import Swagger
# from drf_yasg.utils import filter_none
class AwesomeJSONRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
status_code = renderer_context['response'].status_code
# {'detail': ErrorDetail(string='address value is not Bitcoin Address or Web Address', code='00002')}
if 'detail' in data:
# 에러 exception 인경우임
message = str(data['detail'])
message_code = int(data['detail'].code)
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': message_code,
'message': message,
'data': None,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': message,
# 'error_code': message_code,
}
elif ('detail' not in data) and (status_code in [200, 201, 202]):
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': 100,
'message': 'success',
'data': data,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': '',
# 'error_code': '',
}
else:
# 기본 400 에러인경우
response = {
# 'timnestamp': int(time.time()),
# 'success': True,
# 'status_code': status_code,
'message_code': status_code,
'message': data,
'data': None,
# 'status_code': 200, # 200 고정임
# 'result': {
# 'msg': '',
# 'msg_code': '200',
# 'data': data,
# },
# 'error': '',
# 'error_code': '',
}
return super(AwesomeJSONRenderer, self).render(response, accepted_media_type, renderer_context)
| Python | 0 | |
80a435e3e382791b5615755d05c5353114650ecc | test only | hello.py | hello.py | #!/usr/bin/python
print "Content-type:text/html\r\n\r\n"
print '<html>'
print '<head>'
print '<title>Hello Word - First CGI Program</title>'
print '</head>'
print '<body>'
print '<h2>Hello Word! This is my first CGI program</h2>'
print '</body>'
print '</html>'
| Python | 0 | |
101f378fb536cdaf8f2c681f5b1fba669bf70631 | Add hex xor | hexor.py | hexor.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# xor 2 hex strings
import string
def isHex(s):
'''Check if it is a hex string'''
if (len(s) == 0 or len(s) % 2 != 0
or not all(c in string.hexdigits for c in s)):
return False
return True
def hexor(s1, s2):
'''xor 2 hex strings, returning a hex string'''
s3 = (int(c1,16) ^ int(c2,16) for (c1,c2) in zip(s1,s2))
res = ""
for c in s3:
res += "{:x}".format(c)
return res
if __name__ == "__main__":
while True:
s1 = input("First string: ")
s2 = input("Second string: ")
if not isHex(s1) or not isHex(s2):
print("Your hex string(s) are invalid!")
continue
else:
print("Result: ", hexor(s1,s2))
| Python | 0.000083 | |
6a9d60a6e48b3231675e465c1a837c909a9e652a | Add forward2 | forward2.py | forward2.py | from convert import print_prob, load_image, checkpoint_fn, meta_fn
import tensorflow as tf
import resnet
import os
layers = 50
img = load_image("data/cat.jpg")
sess = tf.Session()
filename = checkpoint_fn(layers)
filename = os.path.realpath(filename)
if layers == 50:
num_blocks = [3, 4, 6, 3]
elif layers == 101:
num_blocks = [3, 4, 23, 3]
elif layers == 152:
num_blocks = [3, 8, 36, 3]
with tf.device('/cpu:0'):
images = tf.placeholder("float32", [None, 224, 224, 3], name="images")
logits = resnet.inference(images,
is_training=False,
num_blocks=num_blocks,
preprocess=True,
bottleneck=True)
prob = tf.nn.softmax(logits, name='prob')
saver = tf.train.Saver()
saver.restore(sess, filename)
graph = tf.get_default_graph()
prob_tensor = graph.get_tensor_by_name("prob:0")
for op in graph.get_operations():
print op.name
print "graph restored"
batch = img.reshape((1, 224, 224, 3))
feed_dict = {images: batch}
prob = sess.run(prob_tensor, feed_dict=feed_dict)
print_prob(prob[0])
| Python | 0.999904 | |
c794fbf00c5ba5b661f01fcbd0652105ed4c3904 | Add missing migration. | mc2/controllers/base/migrations/0005_field_defaults.py | mc2/controllers/base/migrations/0005_field_defaults.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0004_marathonlabel'),
]
operations = [
migrations.AlterField(
model_name='envvariable',
name='key',
field=models.TextField(default='', blank=True),
preserve_default=False,
),
migrations.AlterField(
model_name='marathonlabel',
name='name',
field=models.TextField(default='', blank=True),
preserve_default=False,
),
]
| Python | 0.000002 | |
46a9c3789b86631258d881dacf6ae529ec277d70 | Add stats262.py | ielex/lexicon/management/commands/stats262.py | ielex/lexicon/management/commands/stats262.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.management import BaseCommand
from ielex.lexicon.models import Language, \
Meaning, \
Lexeme, \
CognateJudgementCitation
class Command(BaseCommand):
help = "Computes statistics for https://github.com/lingdb/CoBL/issues/262"\
"\nPossible parameters are: {1, 2, 3} for task number."
def add_arguments(self, parser):
parser.add_argument('task', type=int)
missing_args_message = "Please provide a task number of {1,2,3}."
def handle(self, *args, **options):
# Data to work with:
languageIds = Language.objects.filter(
languagelist__name='Current').values_list('id', flat=True)
meaningIds = Meaning.objects.filter(
meaninglist__name='Jena200').values_list('id', flat=True)
lexemeIds = Lexeme.objects.filter(
language_id__in=languageIds,
meaning_id__in=meaningIds,
not_swadesh_term=False).values_list('id', flat=True)
self.stdout.write("Task %s:" % options['task'])
taskFilter = {1: 'C', # Doubtful
2: 'L', # Loanword
3: 'X'} # Exclude
cjcs = CognateJudgementCitation.objects.filter(
cognate_judgement__lexeme_id__in=lexemeIds,
reliability=taskFilter[options['task']]).all()
for cjc in cjcs:
cj = cjc.cognate_judgement
self.stdout.write("CognateJudgementCitation %s "
"of CognateClass %s "
"and Lexeme %s." % (cjc.id,
cj.cognate_class.id,
cj.lexeme.id))
| Python | 0.000001 | |
6f27af536f9421c2b73def505648a039d4f0ad1f | Manage Rackes Code | ManageRacks.py | ManageRacks.py | import sqlite3
import gi
import json
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
con = sqlite3.connect('SaedRobot.db')
cur = con.cursor()
cur.execute("SELECT VOLSER from inventory")
software_list = cur.fetchall()
class ManageRack(Gtk.Window):
builder =None
window= None
box = None
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("A.glade")
self.window = self.builder.get_object("window1")
grid=self.builder.get_object("grid3")
AddBtn=self.builder.get_object("AddBtn")
DelBtn=self.builder.get_object("DelBtn")
backBtn=self.builder.get_object("backBtn")
AddBtn.connect("clicked",self.Add)
DelBtn.connect("clicked",self.Del)
backBtn.connect("clicked",self.back)
#Creating the ListStore model
self.software_liststore = Gtk.ListStore(str)
for software_ref in software_list:
self.software_liststore.append(list(software_ref))
self.current_filter_language = None
#Creating the filter, feeding it with the liststore model
self.language_filter = self.software_liststore.filter_new()
#creating the treeview, making it use the filter as a model, and adding the columns
self.treeview = Gtk.TreeView.new_with_model(self.language_filter)
for i, column_title in enumerate(["Rack Name"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i)
self.treeview.append_column(column)
#setting up the layout, putting the treeview in a scrollwindow
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
self.scrollable_treelist.set_hexpand(True)
grid.attach(self.scrollable_treelist, 0, 0, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.window.show_all()
def Add(self,button):
self.window.destroy()
self.window=AddRack()
def Del(self,button):
self.window.destroy()
#self.window=login()
def back(self,button):
self.window.destroy()
#self.window=login()
class AddRack():
builder =None
window = None
def __init__(self):
self.builder = Gtk.Builder()
self.builder.add_from_file("A.glade")
self.window = self.builder.get_object("window2")
AddBtn=self.builder.get_object("AddBtn")
backBtn=self.builder.get_object("backBtn")
AddBtn.connect("clicked",self.Add)
backBtn.connect("clicked",self.back)
self.window.show()
def back(self,button):
self.window.destroy()
self.window=ManageRack()
def Add(self,button):
self.window.destroy()
#self.window=ManageRack()
window=ManageRack()
Gtk.main()
| Python | 0 | |
a7728b466f5cacb662566e9e71ebc661ae40271a | Create max_end3.py | Python/CodingBat/max_end3.py | Python/CodingBat/max_end3.py | # http://codingbat.com/prob/p135290
def max_end3(nums):
max = nums[0] if (nums[0] > nums[-1]) else nums[-1] # or use max(arg1, arg2)
for i in range(3):
nums[i] = max
return nums
| Python | 0.000011 | |
6ae82ecdd749b936289b496a10faa2caf1aa94c6 | Add first version of the code | bibsort.py | bibsort.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
from collections import OrderedDict
import codecs
class BibEntry:
def __init__(self, **kwargs):
self.data = {}
for key, value in kwargs.iteritems():
self.data[key] = value
def entry(self):
data = OrderedDict(sorted(self.data.items(), key=lambda t: t[0]))
result = u'@{0}{{{1},\n'.format(self.data['type'].upper(), self.data['key'])
for key, value in data.items():
if key in ['type','key']:
continue
result += u'\t{0} = {{{1}}},\n'.format(key, value)
result = result[:-2] + u'\n}\n'
return result
def must_omit(i):
return re.match("comment", i) or re.match("%%", i)
def entries_from_file(file):
keywords = ['address', 'annote', 'author', 'booktitle', 'chapter', 'crossref',
'doi', 'edition', 'editor', 'eprint', 'eprintclass', 'eprinttype',
'howpublished', 'institution', 'journal', 'month', 'note', 'number',
'organization', 'pages', 'publisher', 'school', 'series', 'title',
'type', 'url', 'urldate', 'volume', 'year']
with codecs.open(file, "r", "utf-8") as f:
text = f.read()
entries = []
entry_blocks = [i for i in re.split("\n@", text) if not must_omit(i)]
for entry in entry_blocks:
entry_dict = {}
search = re.match("(?P<type>.*){(?P<key>.*)", entry)
if search:
key = search.group("key")[:-1]
if search.group("type").startswith('@'):
type = search.group("type")[1:]
else:
type = search.group("type")
entry_dict["key"] = key
entry_dict["type"] = type
for keyword in keywords:
string = "\s*"+keyword+"\s*=\s*[{]?(?P<"+keyword+">\S.*),?\n"
search = re.search(string, entry)
if search:
# Prohibits that 'eprinttype' overrides 'type'
if keyword in entry_dict.keys():
continue
value = search.group(keyword)
if value.endswith(','):
value = value[:-1]
if value.endswith('}}'):
value = value[:-1]
if value.endswith('}') and not value.startswith('{'):
value = value[:-1]
entry_dict[keyword] = value
if entry_dict != {}:
entries.append(BibEntry(**entry_dict))
return entries
BibEntries = entries_from_file('bibliography.bib')
BibEntries.sort(key=lambda x: x.data['key'].lower())
for _ in BibEntries:
print _.entry() | Python | 0.000002 | |
2428467d8c0d9c70a4931e1bd1b5971c9f45a0b7 | add function | function.py | function.py | def foo(x,y):
print(x+y)
foo(3,4)
| Python | 0.000355 | |
cdc2347e1c06608db78a9cb0ac871c3aac455081 | Add beta-python3 | clients/python3/debug_client.py | clients/python3/debug_client.py | '''
Python client for visualizer plugin for Russian AI Cup
'''
import socket
import collections
Color = collections.namedtuple('Color', 'r g b')
class DebugClient(object):
'''
Main class for controlling the plugin
'''
DEFAULT_HOST = '127.0.0.1'
DEFAULT_PORT = 13579
MODE_PRE, MODE_POST, MODE_UNKNOWN = 'pre post unknown'.split()
BEGINS = {MODE_PRE: 'begin pre\n', MODE_POST: 'begin post\n'}
ENDS = {MODE_PRE: 'end pre\n', MODE_POST: 'end post\n'}
def __init__(self, host=None, port=None):
self.socket = socket.socket()
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True)
self.socket.connect((host or self.DEFAULT_HOST, port or self.DEFAULT_PORT))
self.mode = self.MODE_UNKNOWN
self.last_sync_tick = None
self.reader = self.__buffered_reader()
def pre(self):
'''
Method to create a pre-drawing context, that is, to draw things that should be drawn
*before* the field is drawn by local runner (i.e. they will appear "beneath" the field)
'''
assert self.mode == self.MODE_UNKNOWN
self.mode = self.MODE_PRE
return self
def post(self):
'''
Method to create a post-drawing context, that is, to draw things that should be drawn
*after* the field is drawn by local runner (i.e. they will appear "above" the field)
'''
assert self.mode == self.MODE_UNKNOWN
self.mode = self.MODE_POST
return self
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def start(self):
'''
Starts sending messages to specified queue (pre- or post-).
Note: previous value in the queue will be cleaned up on the server
'''
assert self.mode in self.BEGINS
self.socket.sendall((self.BEGINS[self.mode]).encode(encoding='UTF-8'))
def stop(self):
'''
Stops sendings messages to the queue, so server can draw it
'''
assert self.mode in self.ENDS
self.socket.sendall((self.ENDS[self.mode]).encode(encoding='UTF-8'))
self.mode = self.MODE_UNKNOWN
@staticmethod
def __make_color(color): #pylint: disable=missing-docstring
if isinstance(color, Color):
return color
if isinstance(color, (tuple, list)) and len(color) >= 3:
return Color(r=color[0], g=color[1], b=color[2])
return color
def __send_command(self, cmd, color, args, pattern=None): #pylint: disable=missing-docstring
assert self.mode != self.MODE_UNKNOWN
color = self.__make_color(color)
if not pattern:
pattern = '%s' + (' %f' * len(args)) + ' %f %f %f\n'
self.socket.sendall((pattern % ((cmd, ) + args + color)).encode(encoding='UTF-8'))
def circle(self, x0, y0, r0, color): #pylint: disable=invalid-name
'''
Draws a non-filled circle at (x0, y0) with radius "r0" and color "color"
'''
self.__send_command('circle', color, args=(x0, y0, r0))
def fill_circle(self, x0, y0, r0, color): #pylint: disable=invalid-name
'''
Draws a filled circle at (x0, y0) with radius "r0" and color "color"
'''
self.__send_command('fill_circle', color, args=(x0, y0, r0))
def rect(self, x0, y0, x1, y1, color): #pylint: disable=invalid-name, too-many-arguments
'''
Draws a non-filled rect with top-left at (x0, y0) and bottom-right at (x1, y1)
with color "color"
'''
self.__send_command('rect', color, args=(x0, y0, x1, y1))
def fill_rect(self, x0, y0, x1, y1, color): #pylint: disable=invalid-name, too-many-arguments
'''
Draws a filled rect with top-left at (x0, y0) and bottom-right at (x1, y1)
with color "color"
'''
self.__send_command('fill_rect', color, args=(x0, y0, x1, y1))
def line(self, x0, y0, x1, y1, color): #pylint: disable=invalid-name, too-many-arguments
'''
Draws a line from (x0, y0) to (x1, y1) with color "color"
'''
self.__send_command('line', color, args=(x0, y0, x1, y1))
def text(self, x0, y0, msg, color): #pylint: disable=invalid-name
'''
Shows a text message "msg" at (x0, y0) with color "color"
'''
self.__send_command('text', color, args=(x0, y0, msg),
pattern='%s %f %f %s %f %f %f\n')
def is_replay(self, world):
'''
Method to check if given world corresponds to a replay from russianaicup.ru
'''
try:
return self.__is_replay
except AttributeError:
result = True
for player in world.players:
if player.name.startswith('MyStrategy'):
result = False
break
self.__is_replay = result #pylint: disable=attribute-defined-outside-init
return result
def __buffered_reader(self):
'''
Internal generator that implements buffered reads from the socket
'''
buf = ''
while True:
if '\n' in buf:
line, buf = buf.split('\n', 1)
yield line
else:
try:
buf += self.socket.recv(4096)
except socket.error:
return
def syncronize(self, world):
'''
Method to syncronize with the debug server if playing from a replay;
waits for "sync event" and sends acknowledgement
'''
if not self.is_replay(world):
return
if self.last_sync_tick is None or world.tick > self.last_sync_tick:
if self.last_sync_tick is not None:
# server waits for an acknowledgement from us
self.socket.sendall('ack\n')
# get a new sync tick
line = next(self.reader)
if line.startswith('sync '):
self.last_sync_tick = int(line.split()[1].strip())
| Python | 0.000033 | |
d26c014be8194a25b546c06a54efd15dbc5123a1 | create youtube_japi.py | mopidy_youtube/youtube_japi.py | mopidy_youtube/youtube_japi.py | # JSON based scrAPI
class jAPI(scrAPI):
# search for videos and playlists
#
@classmethod
def search(cls, q):
query = {
# get videos only
# 'sp': 'EgIQAQ%253D%253D',
'search_query': q.replace(' ', '+')
}
cls.session.headers = {
'user-agent':
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:66.0)'
' Gecko/20100101 Firefox/66.0',
'Cookie': 'PREF=hl=en;',
'Accept-Language': 'en;q=0.5',
'content_type': 'application/json'
}
logger.info('session.get triggered: search')
result = cls.session.get(jAPI.endpoint+'results', params=query)
json_regex = r'window\["ytInitialData"] = (.*?);'
extracted_json = re.search(json_regex, result.text).group(1)
result_json = json.loads(extracted_json)['contents']['twoColumnSearchResultsRenderer']['primaryContents']['sectionListRenderer']['contents'][0]['itemSectionRenderer']['contents'] # noqa: E501
items = []
for content in result_json:
item = {}
if 'videoRenderer' in content:
item.update({
'id': {
'kind': 'youtube#video',
'videoId': content['videoRenderer']['videoId']
},
# 'contentDetails': {
# 'duration': 'PT'+duration
# }
'snippet': {
'title': content['videoRenderer']['title']['simpleText'], # noqa: E501
# TODO: full support for thumbnails
'thumbnails': {
'default': {
'url': 'https://i.ytimg.com/vi/'
+ content['videoRenderer']['videoId']
+ '/default.jpg',
'width': 120,
'height': 90,
},
},
'channelTitle': content['videoRenderer']['longBylineText']['runs'][0]['text'], # noqa: E501
},
})
elif 'radioRenderer' in content:
pass
elif 'playlistRenderer' in content:
item.update({
'id': {
'kind': 'youtube#playlist',
'playlistId': content['playlistRenderer']['playlistId'] # noqa: E501
},
'contentDetails': {
'itemCount': content['playlistRenderer']['videoCount']
},
'snippet': {
'title': content['playlistRenderer']['title']['simpleText'], # noqa: E501
# TODO: full support for thumbnails
'thumbnails': {
'default': {
'url': 'https://i.ytimg.com/vi/'
+ content['playlistRenderer']['navigationEndpoint']['watchEndpoint']['videoId'] # noqa: E501
+ '/default.jpg',
'width': 120,
'height': 90,
},
'channelTitle': content['playlistRenderer']['longBylineText']['runs'][0]['text'], # noqa: E501
}
},
})
items.append(item)
return json.loads(json.dumps(
{'items': [i for i in items if i]},
sort_keys=False,
indent=1
))
| Python | 0.000001 | |
9873891a9f26edc51a22e51b5910615a7e08d410 | Create WaterLevel.py | device/src/WaterLevel.py | device/src/WaterLevel.py | #Water level sensor.
#VCC
#GND
#AO <--> ADC Port(A7) Analog data
#AO is the specific value.
import pyb
adc = pyb.ADC(Pin('A7')) # create an analog object from a pin
adc = pyb.ADC(pyb.Pin.board.A7)
# read an analog value
def getWaterLevel():
print('WaterLevel Ao')
return adc.read()
| Python | 0.000001 | |
d48a2c7bf3e1f7eb1604ce69c8af4878d8814167 | Add pygments template for Oceanic Next | oceanic_next.py | oceanic_next.py | # -*- coding: utf-8 -*-
"""
Base16 Oceanic Next Dark
by Dmitri Voronianski (http://pixelhunter.me)
Pygments template by Jan T. Sott (https://github.com/idleberg)
Created with Base16 Builder by Chris Kempson (https://github.com/chriskempson/base16-builder)
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, Text, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
BACKGROUND = "#1B2B34"
CURRENT_LINE = "#343D46"
SELECTION = "#4F5B66"
FOREGROUND = "#D8DEE9"
COMMENT = "#ansidarkgray"
RED = "#ansired"
ORANGE = "#F99157"
YELLOW = "#ansiyellow"
GREEN = "#ansigreen"
AQUA = "#ansiturquoise"
BLUE = "#ansiblue"
PURPLE = "#ansifuchsia"
class Oceanic_NextStyle(Style):
default_style = ''
background_color = BACKGROUND
highlight_color = SELECTION
background_color = BACKGROUND
highlight_color = SELECTION
styles = {
# No corresponding class for the following:
Text: FOREGROUND, # class: ''
Whitespace: "", # class: 'w'
Error: RED, # class: 'err'
Other: "", # class 'x'
Comment: "italic " + COMMENT, # class: 'c'
Comment.Multiline: "", # class: 'cm'
Comment.Preproc: "", # class: 'cp'
Comment.Single: "", # class: 'c1'
Comment.Special: "", # class: 'cs'
Keyword: PURPLE, # class: 'k'
Keyword.Constant: "", # class: 'kc'
Keyword.Declaration: "", # class: 'kd'
Keyword.Namespace: AQUA, # class: 'kn'
Keyword.Pseudo: "", # class: 'kp'
Keyword.Reserved: "", # class: 'kr'
Keyword.Type: YELLOW, # class: 'kt'
Operator: AQUA, # class: 'o'
Operator.Word: "", # class: 'ow' - like keywords
Punctuation: FOREGROUND, # class: 'p'
Name: YELLOW, # class: 'n'
Name.Attribute: BLUE, # class: 'na' - to be revised
Name.Builtin: "", # class: 'nb'
Name.Builtin.Pseudo: "", # class: 'bp'
Name.Class: YELLOW, # class: 'nc' - to be revised
Name.Constant: RED, # class: 'no' - to be revised
Name.Decorator: AQUA, # class: 'nd' - to be revised
Name.Entity: "", # class: 'ni'
Name.Exception: RED, # class: 'ne'
Name.Function: BLUE, # class: 'nf'
Name.Property: "", # class: 'py'
Name.Label: "", # class: 'nl'
Name.Namespace: YELLOW, # class: 'nn' - to be revised
Name.Other: BLUE, # class: 'nx'
Name.Tag: AQUA, # class: 'nt' - like a keyword
Name.Variable: RED, # class: 'nv' - to be revised
Name.Variable.Class: "", # class: 'vc' - to be revised
Name.Variable.Global: "", # class: 'vg' - to be revised
Name.Variable.Instance: "", # class: 'vi' - to be revised
Number: ORANGE, # class: 'm'
Number.Float: "", # class: 'mf'
Number.Hex: "", # class: 'mh'
Number.Integer: "", # class: 'mi'
Number.Integer.Long: "", # class: 'il'
Number.Oct: "", # class: 'mo'
Literal: ORANGE, # class: 'l'
Literal.Date: GREEN, # class: 'ld'
String: GREEN, # class: 's'
String.Backtick: "", # class: 'sb'
String.Char: FOREGROUND, # class: 'sc'
String.Doc: COMMENT, # class: 'sd' - like a comment
String.Double: "", # class: 's2'
String.Escape: ORANGE, # class: 'se'
String.Heredoc: "", # class: 'sh'
String.Interpol: ORANGE, # class: 'si'
String.Other: "", # class: 'sx'
String.Regex: "", # class: 'sr'
String.Single: "", # class: 's1'
String.Symbol: "", # class: 'ss'
Generic: "", # class: 'g'
Generic.Deleted: RED, # class: 'gd',
Generic.Emph: "italic", # class: 'ge'
Generic.Error: "", # class: 'gr'
Generic.Heading: "bold " + FOREGROUND, # class: 'gh'
Generic.Inserted: GREEN, # class: 'gi'
Generic.Output: "", # class: 'go'
Generic.Prompt: "bold " + COMMENT, # class: 'gp'
Generic.Strong: "bold", # class: 'gs'
Generic.Subheading: "bold " + AQUA, # class: 'gu'
Generic.Traceback: "", # class: 'gt'
}
| Python | 0 | |
bc6826769bf117c5d6e692f4e975b035aafbb76f | Add shared neutron constants | neutron_lib/constants.py | neutron_lib/constants.py | # Copyright (c) 2012 OpenStack Foundation., 2015 A10Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO(salv-orlando): Verify if a single set of operational
# status constants is achievable
NET_STATUS_ACTIVE = 'ACTIVE'
NET_STATUS_BUILD = 'BUILD'
NET_STATUS_DOWN = 'DOWN'
NET_STATUS_ERROR = 'ERROR'
PORT_STATUS_ACTIVE = 'ACTIVE'
PORT_STATUS_BUILD = 'BUILD'
PORT_STATUS_DOWN = 'DOWN'
PORT_STATUS_ERROR = 'ERROR'
PORT_STATUS_NOTAPPLICABLE = 'N/A'
FLOATINGIP_STATUS_ACTIVE = 'ACTIVE'
FLOATINGIP_STATUS_DOWN = 'DOWN'
FLOATINGIP_STATUS_ERROR = 'ERROR'
DEVICE_OWNER_ROUTER_HA_INTF = "network:router_ha_interface"
DEVICE_OWNER_ROUTER_INTF = "network:router_interface"
DEVICE_OWNER_ROUTER_GW = "network:router_gateway"
DEVICE_OWNER_FLOATINGIP = "network:floatingip"
DEVICE_OWNER_DHCP = "network:dhcp"
DEVICE_OWNER_DVR_INTERFACE = "network:router_interface_distributed"
DEVICE_OWNER_AGENT_GW = "network:floatingip_agent_gateway"
DEVICE_OWNER_ROUTER_SNAT = "network:router_centralized_snat"
DEVICE_OWNER_LOADBALANCER = "neutron:LOADBALANCER"
DEVICE_OWNER_LOADBALANCERV2 = "neutron:LOADBALANCERV2"
DEVICE_OWNER_PREFIXES = ["network:", "neutron:"]
# Collection used to identify devices owned by router interfaces.
# DEVICE_OWNER_ROUTER_HA_INTF is a special case and so is not included.
ROUTER_INTERFACE_OWNERS = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_DVR_INTERFACE)
ROUTER_INTERFACE_OWNERS_SNAT = (DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_DVR_INTERFACE,
DEVICE_OWNER_ROUTER_SNAT)
FLOATINGIP_KEY = '_floatingips'
INTERFACE_KEY = '_interfaces'
HA_INTERFACE_KEY = '_ha_interface'
HA_ROUTER_STATE_KEY = '_ha_state'
METERING_LABEL_KEY = '_metering_labels'
FLOATINGIP_AGENT_INTF_KEY = '_floatingip_agent_interfaces'
SNAT_ROUTER_INTF_KEY = '_snat_router_interfaces'
IPv4 = 'IPv4'
IPv6 = 'IPv6'
IP_VERSION_4 = 4
IP_VERSION_6 = 6
IPv4_BITS = 32
IPv6_BITS = 128
IPv4_ANY = '0.0.0.0/0'
IPv6_ANY = '::/0'
IP_ANY = {IP_VERSION_4: IPv4_ANY, IP_VERSION_6: IPv6_ANY}
DHCP_RESPONSE_PORT = 68
FLOODING_ENTRY = ('00:00:00:00:00:00', '0.0.0.0')
AGENT_TYPE_DHCP = 'DHCP agent'
AGENT_TYPE_OVS = 'Open vSwitch agent'
AGENT_TYPE_LINUXBRIDGE = 'Linux bridge agent'
AGENT_TYPE_OFA = 'OFA driver agent'
AGENT_TYPE_L3 = 'L3 agent'
AGENT_TYPE_LOADBALANCER = 'Loadbalancer agent'
AGENT_TYPE_METERING = 'Metering agent'
AGENT_TYPE_METADATA = 'Metadata agent'
AGENT_TYPE_NIC_SWITCH = 'NIC Switch agent'
L2_AGENT_TOPIC = 'N/A'
PORT_BINDING_EXT_ALIAS = 'binding'
L3_AGENT_SCHEDULER_EXT_ALIAS = 'l3_agent_scheduler'
DHCP_AGENT_SCHEDULER_EXT_ALIAS = 'dhcp_agent_scheduler'
LBAAS_AGENT_SCHEDULER_EXT_ALIAS = 'lbaas_agent_scheduler'
L3_DISTRIBUTED_EXT_ALIAS = 'dvr'
L3_HA_MODE_EXT_ALIAS = 'l3-ha'
SUBNET_ALLOCATION_EXT_ALIAS = 'subnet_allocation'
ETHERTYPE_IPV6 = 0x86DD
# Protocol names and numbers for Security Groups/Firewalls
PROTO_NAME_TCP = 'tcp'
PROTO_NAME_ICMP = 'icmp'
PROTO_NAME_ICMP_V6 = 'icmpv6'
PROTO_NAME_UDP = 'udp'
PROTO_NUM_TCP = 6
PROTO_NUM_ICMP = 1
PROTO_NUM_ICMP_V6 = 58
PROTO_NUM_UDP = 17
# List of ICMPv6 types that should be allowed by default:
# Multicast Listener Query (130),
# Multicast Listener Report (131),
# Multicast Listener Done (132),
# Neighbor Solicitation (135),
# Neighbor Advertisement (136)
ICMPV6_ALLOWED_TYPES = [130, 131, 132, 135, 136]
ICMPV6_TYPE_RA = 134
ICMPV6_TYPE_NA = 136
DHCPV6_STATEFUL = 'dhcpv6-stateful'
DHCPV6_STATELESS = 'dhcpv6-stateless'
IPV6_SLAAC = 'slaac'
IPV6_MODES = [DHCPV6_STATEFUL, DHCPV6_STATELESS, IPV6_SLAAC]
IPV6_LLA_PREFIX = 'fe80::/64'
# Human-readable ID to which default_ipv6_subnet_pool should be set to
# indicate that IPv6 Prefix Delegation should be used to allocate subnet CIDRs
IPV6_PD_POOL_ID = 'prefix_delegation'
# Special provisional prefix for IPv6 Prefix Delegation
PROVISIONAL_IPV6_PD_PREFIX = '::/64'
# Timeout in seconds for getting an IPv6 LLA
LLA_TASK_TIMEOUT = 40
# Linux interface max length
DEVICE_NAME_MAX_LEN = 15
# Device names start with "tap"
TAP_DEVICE_PREFIX = 'tap'
# The vswitch side of a veth pair for a nova iptables filter setup
VETH_DEVICE_PREFIX = 'qvo'
# prefix for SNAT interface in DVR
SNAT_INT_DEV_PREFIX = 'sg-'
# Possible prefixes to partial port IDs in interface names used by the OVS,
# Linux Bridge, and IVS VIF drivers in Nova and the neutron agents. See the
# 'get_ovs_interfaceid' method in Nova (nova/virt/libvirt/vif.py) for details.
INTERFACE_PREFIXES = (TAP_DEVICE_PREFIX, VETH_DEVICE_PREFIX,
SNAT_INT_DEV_PREFIX)
# Time format
ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%f'
| Python | 0.000001 | |
d43d4f29752bfae8a4d2e337f5523cd5fc7888d8 | add Trimplementation of Kadane's algorithm | dp/kadane-_algorithm/py/TrieToSucceed_kadane.py | dp/kadane-_algorithm/py/TrieToSucceed_kadane.py | #!/usr/bin/python3
"""
This module contains an implementation of Kadane's algorithm to determine the
maximum sum of a subarray.
"""
def kadane(list_obj=None):
"""
Find maximum sum of a subarray
:param list list_int: list of objs
:return: maximum sum of subarray
:rtype: int
DOCTESTS
--------
Test 1 (list of ints):
>>> print(kadane([-1, 2, 3, -4, 5, -6]))
6
Test 2 (list of ints):
>>> print(kadane([-1, 2, 3, -6, 5, -6]))
5
Test 3 (list of ints):
>>> print(kadane([3, 2, 3, -7, 5, -6]))
11
Test 4 (invalid argument type):
>>> print(kadane())
Traceback (most recent call last):
...
TypeError: input must be of type list
Test 5 (empty list):
>>> print(kadane([]))
Traceback (most recent call last):
...
ValueError: list must not be empty
"""
if type(list_obj) is not list:
raise TypeError("input must be of type list")
if not list_obj:
raise ValueError("list must not be empty")
max_sum, cur_max = list_obj[0], list_obj[0]
size = len(list_obj)
for idx, val in enumerate(list_obj):
cur_max = max(val, val + cur_max)
max_sum = max(max_sum, cur_max)
return max_sum
if __name__ == '__main__':
import doctest
doctest.testmod()
| Python | 0 | |
3b1b708b739f43bdac86784b27838c80d179572b | solved day 17 | 17/main.py | 17/main.py | import collections
import unittest
def gen_valid_combinations(liters, container_sizes):
first_container_capacity = container_sizes[0]
if len(container_sizes) == 1:
if liters == first_container_capacity:
yield [1]
elif liters == 0:
yield [0]
elif liters == 0:
yield [0 for _ in xrange(len(container_sizes))]
else:
if liters >= first_container_capacity:
for combination in gen_valid_combinations(liters - first_container_capacity,
container_sizes[1:]):
yield [1] + combination
for combination in gen_valid_combinations(liters, container_sizes[1:]):
yield [0] + combination
# ANSWERS ---------------------------
container_sizes = [
43,
3,
4,
10,
21,
44,
4,
6,
47,
41,
34,
17,
17,
44,
36,
31,
46,
9,
27,
38
]
combinations = [combination
for combination in gen_valid_combinations(150, container_sizes)
]
combinations_by_size = collections.defaultdict(lambda:list())
for combination in combinations:
combinations_by_size[sum(combination)].append(combination)
for size, combinations in combinations_by_size.iteritems():
print size, len(combinations)
# TESTS -----------------------------
class MyTests(unittest.TestCase):
def test_one_container_impossible(self):
container_sizes = [10]
combinations = [combination
for combination in gen_valid_combinations(11, container_sizes)]
self.assertEqual(combinations, [])
def test_one_container_possible(self):
container_sizes = [10]
combinations = [combination
for combination in gen_valid_combinations(10, container_sizes)]
self.assertEqual(combinations, [[1]])
def test_two_container_possible(self):
container_sizes = [10, 5]
combinations = [combination
for combination in gen_valid_combinations(15, container_sizes)]
self.assertEqual(combinations, [[1, 1]])
def test_examples(self):
container_sizes = [20, 15, 10, 5, 5]
combinations = [combination
for combination in gen_valid_combinations(25, container_sizes)]
self.assertEqual(combinations, [
[1, 0, 0, 1, 0],
[1, 0, 0, 0, 1],
[0, 1, 1, 0, 0],
[0, 1, 0, 1, 1]])
if __name__ == "__main__":
unittest.main() | Python | 0.999067 | |
4d196f4f897ac6d2c590803d491192e340ec475e | fetch option order example | examples/py/async-binance-fetch-option-order.py | examples/py/async-binance-fetch-option-order.py | # -*- coding: utf-8 -*-
import asyncio
import os
import sys
from pprint import pprint
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async_support as ccxt # noqa: E402
async def main():
exchange = ccxt.binance({
'apiKey': 'YOUR_API_KEY',
'secret': 'YOUR_SECRET',
# 'verbose': True, # for debug output
})
await exchange.load_markets()
market_id = 'ETH-221028-1700-C'
order_id = 4612098335294532880
try:
response = await exchange.eapiPrivateGetOpenOrders({
# 'symbol': market_id, # optional
# 'orderId': order_id, # optional
})
pprint(response)
except Exception as e:
print('eapiPrivateGetOpenOrders() failed')
print(e)
await exchange.close()
asyncio.run(main())
| Python | 0.999999 | |
08e43e8bfd150252b3e05ff62ee25cdf0e519f20 | Revert #830 because it broke the case when the main script is not in path. | meson.py | meson.py | #!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild import mesonmain
import sys, os
def main():
thisfile = __file__
if not os.path.isabs(thisfile):
thisfile = os.path.normpath(os.path.join(os.getcwd(), thisfile))
if __package__ == '':
thisfile = os.path.dirname(thisfile)
# The first argument *must* be an absolute path because
# the user may have launched the program from a dir
# that is not in path.
sys.exit(mesonmain.run(thisfile, sys.argv[1:]))
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
# Copyright 2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mesonbuild import mesonmain
import sys, os
sys.exit(mesonmain.run(sys.argv[0], sys.argv[1:]))
| Python | 0 |
27b2e87a8653961fbba45962e9e6ec1d20904a03 | Create demo_lcd.py | 20x4LCD/demo_lcd.py | 20x4LCD/demo_lcd.py | import lcddriver
from time import *
lcd = lcddriver.lcd()
lcd.lcd_display_string("Hello world", 1)
lcd.lcd_display_string("My name is", 2)
lcd.lcd_display_string("picorder", 3)
lcd.lcd_display_string("I am a Raspberry Pi", 4)
| Python | 0.000001 | |
2c345f2927cba033908020b97c33064bbfce5fbd | Add 38-count-and-say.py | 38-count-and-say.py | 38-count-and-say.py | """
Count and Say
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as "one 1" or 11.
11 is read off as "two 1s" or 21.
21 is read off as "one 2, then one 1" or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
----------------- A better description from careercup.com ---------------
"Count and Say problem" Write a code to do following:
n String to print
0 1
1 1 1
2 2 1
3 1 2 1 1
...
Base case: n = 0 print "1"
for n = 1, look at previous string and write number of times a digit is seen and the digit itself. In this case, digit 1 is seen 1 time in a row... so print "1 1"
for n = 2, digit 1 is seen two times in a row, so print "2 1"
for n = 3, digit 2 is seen 1 time and then digit 1 is seen 1 so print "1 2 1 1"
for n = 4 you will print "1 1 1 2 2 1"
Consider the numbers as integers for simplicity. e.g. if previous string is "10 1" then the next will be "1 10 1 1" and the next one will be "1 1 1 10 2 1"
Performance:
1. Total Accepted: 56840 Total Submissions: 219756 Difficulty: Easy
"""
class Solution(object):
# Thanks https://github.com/jw2013/Leetcode-Py/blob/master/Count%20and%20Say.py
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
sequence = "1"
for time in xrange(n - 1):
idx, next_sequence = 0, ""
end_idx = len(sequence) - 1
while idx < len(sequence):
count = 1
while idx < end_idx and sequence[idx] == sequence[idx + 1]:
idx += 1
count += 1
next_sequence += "{}{}".format(count, sequence[idx])
idx += 1
sequence = next_sequence
return sequence
def test_func(result, expect):
assert result == expect, [result, expect]
test_func(Solution().countAndSay(1), "1")
test_func(Solution().countAndSay(2), "11")
test_func(Solution().countAndSay(3), "21")
test_func(Solution().countAndSay(4), "1211")
test_func(Solution().countAndSay(5), "111221")
test_func(Solution().countAndSay(6), "312211")
"""
if n == 0:
return sequence
while n > 0:
next_sequence = ""
curr_char = sequence[0]
curr_char_matching_count = 1
dummy_end = len(sequence) # to finish the last count+num
is_same = False
for idx in xrange(1, dummy_end + 1):
if idx < dummy_end:
is_same = sequence[idx] == curr_char
if is_same:
curr_char_matching_count += 1
if (idx == dummy_end) or not is_same:
next_sequence += curr_char + str(curr_char_matching_count)
# prepare next round
if (idx < dummy_end) and (not is_same):
curr_char = sequence[idx]
sequence = next_sequence
n -= 1"""
"""
NOTE: If dont use a cursor, but use some variables to hold position informations, it's hard to debug!!! And it costs me several hours...
class Solution(object):
def countAndSay(self, num):
sequence = "1" # the default start
for time in range(num):
next_sequence = ""
curr_char_matching_count = 1
for idx, curr_char in enumerate(sequence):
if idx < len(curr_char) - 1:
if curr_char == sequence[idx + 1]:
curr_char_matching_count += 1
else:
next_sequence += (str(curr_char_matching_count) + curr_char)
curr_char_matching_count = 0
if idx == len(curr_char) - 1:
next_sequence += (str(curr_char_matching_count) + curr_char)
sequence = next_sequence
print "sequence:", sequence
print "-"*100
print
return sequence
"""
| Python | 0.998619 | |
e4a4e8d43c1b4c63ac32467a8e49a5b81f8f2fa3 | Create roundrobin.py | roundrobin.py | roundrobin.py | import string
from game import Game
class RoundRobin(object):
def __init__(self, teams_count):
self.teams = generateTeams(teams_count)
self.current_round = 0
def getRound(self):
games = []
teams_count = len(self.teams)
home_away_index = self.current_round // (teams_count-1)
for i in range(0, teams_count, 2):
if home_away_index%2 == 0:
game = Game( self.teams[i], self.teams[i+1] )
else:
game = Game( self.teams[i+1], self.teams[i] )
games.append( game )
return games
def getNextRound(self):
self.rotate()
return self.getRound()
def rotate(self):
head = self.teams[0]
tail = self.teams[1: len(self.teams)-1]
second = self.teams[len(self.teams)-1]
self.teams = []
self.teams.append(head)
self.teams.append(second)
self.teams = self.teams + tail
self.current_round += 1
def getSchedule(self, rounds_count):
schedule = []
for i in range(rounds_count):
games = self.getRound()
schedule.append(games)
self.rotate()
return schedule
def printSchedule(self, rounds_count):
schedule = self.getSchedule(rounds_count)
for day in range(len(schedule)):
print "== Day #" + str(day+1)
games = schedule[day]
for game in games:
print game
self.rotate()
def generateTeams(teams_count):
teams = list(string.ascii_uppercase)[:teams_count]
if teams_count%2 != 0:
teams.append(" ")
return teams
| Python | 0.000369 | |
d1eac9803adbf9b91b22ce62a4bdf5db790b6265 | Create ShodanToCSV.py | ShodanToCSV.py | ShodanToCSV.py | #!/usr/bin/env python
#
# Search shodan, output to CSV
# To ensure comma as seperator, all comma's in os and header field (if any) are replaced for ;;;
# To ensure row integrity all newlines (\n) are replaced by #NWLN
# Author: Jeroen
import shodan
import sys
import os
from optparse import OptionParser
#Initialize userinput
oparser = OptionParser("usage: %prog [options] [command]*", version="v%d.%d.%d" % (1, 0, 0))
oparser.add_option("-d", "--debug", dest="debug", action = "store_true", help="Be extremely verbose", default=False)
oparser.add_option("-k", "--key", dest="AKEY", help="Use your personal API key",default="GETYOUROWNKEY")
oparser.add_option("-s", "--search", dest="searchQuery", help="Insert shodan search query",default=False)
oparser.add_option("-o", "--output", dest="outputFileName", help="output filename",default="output.csv")
(options,args) = oparser.parse_args(sys.argv)
if (options.searchQuery == False):
print 'Type shodanToCSV.py --help for syntax'
sys.exit(1)
try:
# Setup the api
api = shodan.WebAPI(options.AKEY)
# Perform the search
result = api.search(options.searchQuery)
csvHeader = "ip,port,os,country,lastupdate,header\n"
fo = open(options.outputFileName, 'w')
fo.write(str(csvHeader))
# Loop through the matches and print each IP
for result in result['matches']:
row = result['ip'] + ',' + str(result['port']) + ',' + str(result['os']).replace(",",";;;") + ',' + result['country_name'] + ',' + result['updated'] + ',' + str(result['data']).replace(",",";;;")
row = row.replace("\r\n","").replace("\n","") + str(os.linesep)
if(options.debug != False):
print str(row)
fo.write(str(row))
fo.close()
except Exception, e:
print 'Error: %s' % e
exit(1)
| Python | 0 | |
9be177007ce95f2b9e47225a46effe7b7682ba38 | Create StockReader.py | StockReader.py | StockReader.py | #econogee, 1/28/2016
#Stock Data Retrieval Script
import os
import numpy as np
import urllib2
startday = str(0)
startmonth = str(1)
startyear = str(2005)
endday = str(30)
endmonth = str(1)
endyear = str(2016)
symbols = []
with open('stocklist.csv') as f:
content = f.readlines()
for l in content:
symbols.append(l.split(",")[0])
for s in symbols:
response = urllib2.urlopen('http://real-chart.finance.yahoo.com/table.csv?s='+str(s)+\
'&a=' + startday + '&b=' + startmonth + '&c=' + startyear + \
'&d=' + endday + '&e=' + endmonth + '&f=' + endyear + \
'&g=d&ignore=.csv')
html = response.read()
html = html.split('\n')
html = np.array(html)
np.savetxt(str(s),html,fmt='%s',delimiter=',')
| Python | 0 | |
706da9008e8101c03bb2c7754b709209897cd952 | Add Organization Administrator model. | app/soc/models/org_admin.py | app/soc/models/org_admin.py | #!/usr/bin/python2.5
#
# Copyright 2008 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the Organization Administrator Model."""
__authors__ = [
'"Pawel Solyga" <pawel.solyga@gmail.com>',
]
import soc.models.role
class OrgAdmin(soc.models.role.Role):
"""Adminitrator details for a specific Organization.
"""
pass
| Python | 0 | |
2e3349b75fffb9a9f3906d065bc8f141eef02d38 | Add run_wsgi | run_wsgi.wsgi | run_wsgi.wsgi | #!/usr/bin/env python
import os
import sys
sys.stdout = sys.stderr
INTELLIDATA_DIR = os.path.dirname(__file__)
sys.path.insert(0, INTELLIDATA_DIR)
os.chdir(INTELLIDATA_DIR)
import config
from intellidata import app as application
application.config.from_object('config')
| Python | 0.000005 | |
a086e7328ca920f269812a87be095ce638467f95 | Add youtube-dl library sample of operation | crawler/youtube_dl_op_sample.py | crawler/youtube_dl_op_sample.py | #!/usr/bin/env python2
#-*- coding: utf-8 -*-
import sys
import youtube_dl
def main():
if len(sys.argv) < 2:
print("Usage: youtube_dl_op_sample.py URL")
return
opts = {
'forceurl': True,
'quiet': True,
'simulate': True,
}
url = sys.argv[1]
try:
with youtube_dl.YoutubeDL(opts) as ydl:
extract_info = ydl.extract_info(url)
resource_uri = extract_info.get('url')
if not resource_uri:
format_id = extract_info.get('format_id')
for fmt in extract_info.get('formats'):
if format_id != fmt.get('format_id'):
continue
resource_uri = fmt.get('url')
except Exception as e:
print(e)
resource_uri = None
if resource_uri:
print("resource_uri: %s" % resource_uri)
else:
print("Nothing at all.")
if __name__ == '__main__':
main()
| Python | 0 | |
70927650139a94b1c7be5557e47340ccda609d36 | Create UnicommWlan.py | UnicommWlan.py | UnicommWlan.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Auto login the unicomm wlan
# By Lu CHAO(me@chao.lu) ,2013 10 12.
from urllib2 import build_opener,HTTPCookieProcessor
from urllib import urlencode
from cookielib import CookieJar
import time,sys
from random import random
global loop
global count
loop=True
count=0
def LoginWlan(user,password,address):
index_page = "http://202.106.46.37/"
global loop,count
try:
#获得一个cookieJar实例
cj = CookieJar()
#cookieJar作为参数,获得一个opener的实例
opener=build_opener(HTTPCookieProcessor(cj))
#伪装成一个正常的浏览器,避免有些web服务器拒绝访问。
opener.addheaders = [('User-agent','Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)')]
#生成Post数据,含有登陆用户名密码。
data = urlencode({"username":user,"password":password,"passwordType":6,"wlanuserip":"","userOpenAddress":address,"checkbox":0,"basname":"","setUserOnline":"","sap":"","macAddr":"","bandMacAuth":0,"isMacAuth":"","basPushUrl":"http%253A%252F%252F202.106.46.37%252F","passwordkey":""})
#以post的方法访问登陆页面,访问之后cookieJar会自定保存cookie
opener.open(index_page)
#以带cookie的方式访问页面ss
op=opener.open("http://202.106.46.37/login.do",data)
#读取页面源码
data= op.read()
if 'success' in data:
print "%s : Logsin Success"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
elif u"此账号已在线!" in data.decode('utf-8'):
count=count+1
else:
print "%s :Failed "%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())))
print data.decode('utf-8')
loop=False
opener.close()
return
except Exception,e:
print str(e)
file=open("/var/log/autologin.log",'w')
sys.stdout=file
sys.stderr=file
while loop:
#在这里更改你的用户名,密码,归属地
LoginWlan("你的用户名","你的密码","bj")
file.flush()
if count%10==1:
print "%s :Count %d"%(time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())),count)
elif count>10000:
count=0
else:
None
time.sleep(20+int(random()*5))
| Python | 0 | |
a083baddd853514a5697e3a98eea4251c2ce5487 | Create __openerp__.py | __openerp__.py | __openerp__.py | {
"name": "Product price based on margin with formula sale_price=cost_price/margin",
"version": "8.0.0.1",
"author": "3nodus",
'category': 'Product',
"website": "http://www.3nodus.com/",
"license": "AGPL-3",
"depends": [
"product",
],
"demo": [
],
"data": [
],
"test": [],
"js": [],
"css": [],
"qweb": [],
"installable": True,
"auto_install": False,
}
| Python | 0.005291 | |
22a3a6aa70c2960983887717b98cab2149a18d89 | Fix #121: don't accept tells to bot | plugins/tell.py | plugins/tell.py | " tell.py: written by sklnd in July 2009"
" 2010.01.25 - modified by Scaevolus"
import time
from util import hook, timesince
def db_init(db):
"check to see that our db has the tell table and return a dbection."
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
return db
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(paraml, input=None, db=None):
if 'showtells' in input.msg.lower():
return
db_init(db)
tells = get_tells(db, input.nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+%d more, .showtells to view)" % (len(tells) - 1)
db.execute("delete from tell where user_to=lower(?) and message=?",
(input.nick, message))
db.commit()
input.pm(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', pm=None, db=None):
".showtells -- view all pending tell messages (sent in PM)."
db_init(db)
tells = get_tells(db, nick)
if not tells:
pm("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
pm("%s said %s ago in %s: %s" % (user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None, conn=None):
".tell <nick> <message> -- relay <message> to <nick> when <nick> is around"
query = inp.split(' ', 1)
if len(query) != 2:
return tell.__doc__
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to in (user_from.lower(), conn.nick.lower()):
return "No."
db_init(db)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 5:
return "That person has too many things queued."
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
return "Message has already been queued."
return "I'll pass that along."
| " tell.py: written by sklnd in July 2009"
" 2010.01.25 - modified by Scaevolus"
import time
from util import hook, timesince
def db_init(db):
"check to see that our db has the tell table and return a dbection."
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
return db
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(paraml, input=None, db=None):
if 'showtells' in input.msg.lower():
return
db_init(db)
tells = get_tells(db, input.nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+%d more, .showtells to view)" % (len(tells) - 1)
db.execute("delete from tell where user_to=lower(?) and message=?",
(input.nick, message))
db.commit()
input.pm(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', pm=None, db=None):
".showtells -- view all pending tell messages (sent in PM)."
db_init(db)
tells = get_tells(db, nick)
if not tells:
pm("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
pm("%s said %s ago in %s: %s" % (user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None):
".tell <nick> <message> -- relay <message> to <nick> when <nick> is around"
query = inp.split(' ', 1)
if len(query) != 2:
return tell.__doc__
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to == user_from.lower():
return "No."
db_init(db)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 5:
return "That person has too many things queued."
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
return "Message has already been queued."
return "I'll pass that along."
| Python | 0 |
ca83457b4a003527cad9c9d57402c53e4571299c | add python opt and logging boilerplate code | sandbox/python/boilerplate_code/python_opt_log.py | sandbox/python/boilerplate_code/python_opt_log.py | #!/usr/bin/env python
import argparse
import logging
import os
import sys
import re
logger = None
def my_function(blah):
return
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
parser = argparse.ArgumentParser(description="program name", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("input1", type = file)
parser.add_argument("input2", type = file)
parser.add_argument("--selection", type = str, default = 'a', choices = ['a', 'b', 'c'], help = 'choose from a,b,c')
parser.add_argument("--cutoff", type = int, default = 1, help = 'cutoff score')
parser.add_argument("--variable_args", type = float, action = 'append', nargs = 3,
default = [1.0,2.0,1.2], help = '3 scores')
parser.add_argument("--verbose","-v", action = 'count', help='increase verbosity')
args = parser.parse_args()
if args.verbose >= 1:
logging.basicConfig(level=logging.DEBUG, format = FORMAT)
else:
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
logger.info("working hard ...")
my_function(args.input1, args.input2)
logger.info("Done.")
| Python | 0 | |
1058a9cb6e667c850f56b6003038496b77c359c5 | Add tool to fix links. | website/tools/append_index_html_to_internal_links.py | website/tools/append_index_html_to_internal_links.py | """Script to fix the links in the staged website.
Finds all internal links which do not have index.html at the end and appends
index.html in the appropriate place (preserving anchors, etc).
Usage:
From root directory, after running the jekyll build, execute
'python tools/append_index_html_to_internal_links.py'.
Dependencies:
beautifulsoup4
Installable via pip as 'sudo pip install beautifulsoup4' or apt via
'sudo apt-get install python-beautifulsoup4'.
"""
import fnmatch
import os
import re
from bs4 import BeautifulSoup
# Original link match. Matches any string which starts with '/' and doesn't
# have a file extension.
linkMatch = r'^\/(.*\.(?!([^\/]+)$))?[^.]*$'
# Regex which matches strings of type /internal/link/#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch1 = r'(.+\/)(#[^\/]+$)'
# Regex which matches strings of type /internal/link#anchor. Breaks into two
# groups for ease of inserting 'index.html'.
anchorMatch2 = r'(.+\/[a-zA-Z0-9]+)(#[^\/]+$)'
matches = []
# Recursively walk content directory and find all html files.
for root, dirnames, filenames in os.walk('content'):
for filename in fnmatch.filter(filenames, '*.html'):
# Javadoc does not have the index.html problem, so omit it.
if 'javadoc' not in root:
matches.append(os.path.join(root, filename))
print 'Matches: ' + str(len(matches))
# Iterates over each matched file looking for link matches.
for match in matches:
print 'Fixing links in: ' + match
mf = open(match)
soup = BeautifulSoup(mf, "lxml")
# Iterates over every <a>
for a in soup.findAll('a'):
try:
hr = a['href']
if re.match(linkMatch, hr) is not None:
if hr.endswith('/'):
# /internal/link/
a['href'] = hr + 'index.html'
elif re.match(anchorMatch1, hr) is not None:
# /internal/link/#anchor
mat = re.match(anchorMatch1, hr)
a['href'] = mat.group(1) + 'index.html' + mat.group(2)
elif re.match(anchorMatch2, hr) is not None:
# /internal/link#anchor
mat = re.match(anchorMatch2, hr)
a['href'] = mat.group(1) + '/index.html' + mat.group(2)
else:
# /internal/link
a['href'] = hr + '/index.html'
mf.close()
html = soup.prettify("utf-8")
# Write back to the file.
with open(match, "wb") as f:
print 'Replacing ' + hr + ' with: ' + a['href']
f.write(html)
except KeyError as e:
# Some <a> tags don't have an href.
continue
| Python | 0 | |
ede7a61e1c1a77438bc027b41a5a9cb03eb6328c | raise a timeout in nrpe_poller test, so windows connect() has enought time | test/test_modules_nrpe_poller.py | test/test_modules_nrpe_poller.py | #!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import os
from Queue import Empty
from multiprocessing import Queue, Manager, active_children
from shinken_test import *
from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules import nrpe_poller
from shinken.modules.nrpe_poller import get_instance
modconf = Module()
modconf.module_name = "NrpePoller"
modconf.module_type = nrpe_poller.properties['type']
modconf.properties = nrpe_poller.properties.copy()
class TestNrpePoller(ShinkenTest):
# Uncomment this is you want to use a specific configuration
# for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_module_hack_cmd_poller_tag.cfg')
def test_nrpe_poller(self):
mod = nrpe_poller.Nrpe_poller(modconf)
sl = get_instance(mod)
# Look if we really change our commands
print sl.__dict__
sl.id = 1
sl.i_am_dying = False
manager = Manager()
to_queue = manager.Queue()
from_queue = manager.Queue() # list()
control_queue = Queue()
# We prepare a check in the to_queue
status = 'queue'
command = "$USER1$/check_nrpe -H localhost33 -n -u -t 5 -c check_load3 -a 20" # -a arg1 arg2 arg3"
ref = None
t_to_to = time.time()
c = Check(status, command, ref, t_to_to)
msg = Message(id=0, type='Do', data=c)
to_queue.put(msg)
# The worker will read a message by loop. We want it to
# do 2 loops, so we fake a message, adn the Number 2 is a real
# exit one
msg1 = Message(id=0, type='All is good, continue')
msg2 = Message(id=0, type='Die')
control_queue.put(msg1)
for _ in xrange(1, 2):
control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
control_queue.put(msg2)
sl.work(to_queue, from_queue, control_queue)
o = from_queue.get() # pop()
print "O", o
print o.__dict__
self.assert_(o.status == 'done')
self.assert_(o.exit_status == 2)
# to_queue.close()
# control_queue.close()
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
# Copyright (C) 2009-2010:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
import os
from Queue import Empty
from multiprocessing import Queue, Manager, active_children
from shinken_test import *
from shinken.log import logger
from shinken.objects.module import Module
from shinken.modules import nrpe_poller
from shinken.modules.nrpe_poller import get_instance
modconf = Module()
modconf.module_name = "NrpePoller"
modconf.module_type = nrpe_poller.properties['type']
modconf.properties = nrpe_poller.properties.copy()
class TestNrpePoller(ShinkenTest):
# Uncomment this is you want to use a specific configuration
# for your test
#def setUp(self):
# self.setup_with_file('etc/nagios_module_hack_cmd_poller_tag.cfg')
def test_nrpe_poller(self):
mod = nrpe_poller.Nrpe_poller(modconf)
sl = get_instance(mod)
# Look if we really change our commands
print sl.__dict__
sl.id = 1
sl.i_am_dying = False
manager = Manager()
to_queue = manager.Queue()
from_queue = manager.Queue() # list()
control_queue = Queue()
# We prepare a check in the to_queue
status = 'queue'
command = "$USER1$/check_nrpe -H localhost33 -n -u -t 1 -c check_load3 -a 20" # -a arg1 arg2 arg3"
ref = None
t_to_to = time.time()
c = Check(status, command, ref, t_to_to)
msg = Message(id=0, type='Do', data=c)
to_queue.put(msg)
# The worker will read a message by loop. We want it to
# do 2 loops, so we fake a message, adn the Number 2 is a real
# exit one
msg1 = Message(id=0, type='All is good, continue')
msg2 = Message(id=0, type='Die')
control_queue.put(msg1)
for _ in xrange(1, 2):
control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
# control_queue.put(msg1)
control_queue.put(msg2)
sl.work(to_queue, from_queue, control_queue)
o = from_queue.get() # pop()
print "O", o
print o.__dict__
self.assert_(o.status == 'done')
self.assert_(o.exit_status == 2)
# to_queue.close()
# control_queue.close()
if __name__ == '__main__':
unittest.main()
| Python | 0 |
8bf248f304e7188e279a37ff06c8fc41f54e1df8 | Add console log | Logging.py | Logging.py |
from GulpServer.Settings import Settings
user_settings = None
def plugin_loaded():
global user_settings
user_settings = Settings()
class Console(object):
def log(self, *args):
if user_settings.get('dev'):
print(*args)
| Python | 0.000003 | |
9451bfccaf9e2782dc0b1e7670f61ce765b8e7c2 | Update for Issue #163 | tamper/nonrecursivereplacement.py | tamper/nonrecursivereplacement.py | #!/usr/bin/env python
"""
Copyright (c) 2006-2012 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import random
import re
from lib.core.common import singleTimeWarnMessage
from lib.core.enums import PRIORITY
__priority__ = PRIORITY.NORMAL
def tamper(payload, headers):
"""
Replaces predefined SQL keywords with representations
suitable for replacement (e.g. .replace("SELECT", "")) filters
Example:
* Input: 1 UNION SELECT 2--
* Output: 1 UNUNIONION SELSELECTECT 2--
Notes:
* Useful to bypass very weak custom filters
"""
keywords = ("UNION", "SELECT", "INSERT", "UPDATE", "FROM", "WHERE")
retVal = payload
warnMsg = "currently only couple of keywords are being processed %s. " % str(keywords)
warnMsg += "You can set it manually according to your needs"
singleTimeWarnMessage(warnMsg)
if payload:
for keyword in keywords:
_ = random.randint(1, len(keyword) - 1)
retVal = re.sub(r"(?i)\b%s\b" % keyword, "%s%s%s" % (keyword[:_], keyword, keyword[_:]), retVal)
return retVal, headers
| Python | 0 | |
ec07c74852eaf9bc6ec7d4abb0e5bb3a740501a4 | Add BoundingBox tests | photutils/aperture/tests/test_bounding_box.py | photutils/aperture/tests/test_bounding_box.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from ..bounding_box import BoundingBox
try:
import matplotlib
HAS_MATPLOTLIB = True
except:
HAS_MATPLOTLIB = False
def test_bounding_box_init():
bbox = BoundingBox(1, 10, 2, 20)
assert bbox.ixmin == 1
assert bbox.ixmax == 10
assert bbox.iymin == 2
assert bbox.iymax == 20
def test_bounding_box_init_minmax():
with pytest.raises(ValueError):
BoundingBox(100, 1, 1, 100)
with pytest.raises(ValueError):
BoundingBox(1, 100, 100, 1)
def test_bounding_box_inputs():
with pytest.raises(TypeError):
BoundingBox([1], [10], [2], [9])
with pytest.raises(TypeError):
BoundingBox([1, 2], 10, 2, 9)
with pytest.raises(TypeError):
BoundingBox(1.0, 10.0, 2.0, 9.0)
with pytest.raises(TypeError):
BoundingBox(1.3, 10, 2, 9)
with pytest.raises(TypeError):
BoundingBox(1, 10.3, 2, 9)
with pytest.raises(TypeError):
BoundingBox(1, 10, 2.3, 9)
with pytest.raises(TypeError):
BoundingBox(1, 10, 2, 9.3)
def test_bounding_box_from_float():
# This is the example from the method docstring
bbox = BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0)
assert bbox == BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21)
bbox = BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6)
assert bbox == BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12)
def test_bounding_box_eq():
bbox = BoundingBox(1, 10, 2, 20)
assert bbox == bbox
assert bbox != BoundingBox(9, 10, 2, 20)
assert bbox != BoundingBox(1, 99, 2, 20)
assert bbox != BoundingBox(1, 10, 9, 20)
assert bbox != BoundingBox(1, 10, 2, 99)
def test_bounding_box_repr():
bbox = BoundingBox(1, 10, 2, 20)
assert repr(bbox) == 'BoundingBox(ixmin=1, ixmax=10, iymin=2, iymax=20)'
assert eval(repr(bbox)) == bbox
def test_bounding_box_shape():
bbox = BoundingBox(1, 10, 2, 20)
assert bbox.shape == (18, 9)
def test_bounding_box_slices():
bbox = BoundingBox(1, 10, 2, 20)
assert bbox.slices == (slice(2, 20), slice(1, 10))
def test_bounding_box_extent():
bbox = BoundingBox(1, 10, 2, 20)
assert_allclose(bbox.extent, (0.5, 9.5, 1.5, 19.5))
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_bounding_box_as_patch():
bbox = BoundingBox(1, 10, 2, 20)
patch = bbox.as_patch()
assert_allclose(patch.get_xy(), (0.5, 1.5))
assert_allclose(patch.get_width(), 9)
assert_allclose(patch.get_height(), 18)
| Python | 0 | |
eef0cb0ff41ec35d92e3d76e1e15c1d6edd5b786 | Add ICC(2,1) and ICC(3,1) calculation | analise/icc.py | analise/icc.py | def icc(data, icc_type):
''' Calculate intraclass correlation coefficient for data within
Brain_Data class
ICC Formulas are based on:
Shrout, P. E., & Fleiss, J. L. (1979). Intraclass correlations: uses in
assessing rater reliability. Psychological bulletin, 86(2), 420.
icc1: x_ij = mu + beta_j + w_ij
icc2/3: x_ij = mu + alpha_i + beta_j + (ab)_ij + epsilon_ij
Code modifed from nipype algorithms.icc
https://github.com/nipy/nipype/blob/master/nipype/algorithms/icc.py
Args:
icc_type: type of icc to calculate (icc: voxel random effect,
icc2: voxel and column random effect, icc3: voxel and
column fixed effect)
Returns:
ICC: intraclass correlation coefficient
'''
# n: number of targets
# k: number of judges
Y = data
[n, k] = Y.shape
# Degrees of Freedom
dfb = n - 1
dfw = n * (k - 1)
dfj = k - 1
dfe = (n - 1) * (k - 1)
# Sum Square Total
mean_Y = np.mean(Y)
SST = ((Y - mean_Y) ** 2).sum()
# create the design matrix for the different levels
x = np.kron(np.eye(k), np.ones((n, 1))) # sessions
x0 = np.tile(np.eye(n), (k, 1)) # subjects
X = np.hstack([x, x0])
# Sum Square Error
predicted_Y = np.dot(np.dot(np.dot(X, np.linalg.pinv(np.dot(X.T, X))),
X.T), Y.flatten('F'))
residuals = Y.flatten('F') - predicted_Y
SSE = (residuals ** 2).sum()
EMS = SSE / dfe
# Sum square column effect - between colums
SSC = ((np.mean(Y, 0) - mean_Y) ** 2).sum() * n
JMS = SSC / dfj
# Sum Square subject effect - between rows/subjects
SSR = SST - SSC - SSE
BMS = SSR / dfb
# SSW = ((Y - np.mean(Y, 0)) ** 2).sum()
# print((Y - np.mean(Y, 0)) ** 2)
# WMS = SSW / dfw
# print("SST = " + str(SST))
# print("SSE = " + str(SSE))
# print("SSC = " + str(SSC))
# print("SSR = " + str(SSR))
# print("SSW = " + str(SSW))
if icc_type == 'icc1':
# ICC = (BMS - WMS) / (BMS + (k-1) * WMS)
ICC = -1
elif icc_type == 'icc2':
# ICC(2,1) = (mean square subject - mean square error) /
# (mean square subject + (k-1)*mean square error +
# k*(mean square columns - mean square error)/n)
ICC = (BMS - EMS) / (BMS + (k-1) * EMS + k * (JMS - EMS) / n)
elif icc_type == 'icc3':
# ICC(3,1) = (mean square subject - mean square error) /
# (mean square subject + (k-1)*mean square error)
ICC = (BMS - EMS) / (BMS + (k-1) * EMS)
return ICC
import numpy as np
data = np.array([
[9,2,5,8],
[6,1,3,2],
[8,4,6,8],
[7,1,2,6],
[10,5,6,9],
[6,2,4,7]
])
# print("ICC(1,1): " + str(icc(data,'icc1'))) # aprox. 0.17
print("ICC(2,1): " + str(icc(data,'icc2'))) # aprox. 0.29
print("ICC(3,1): " + str(icc(data,'icc3'))) # aprox. 0.71 | Python | 0.004858 | |
ae3005089da6edc4d4488b8619dcbee9e556fc22 | Fix typo | pylxd/client.py | pylxd/client.py |
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from . import connection
from . import container
from . import certificate
from . import hosts
from . import image
from . import network
from . import profiles
class Client(object):
def __init__(self, base_url, host):
self.unix_socket = '/var/lib/lxd/unix.socket'
self.base_url = base_url
self.host = host
if base_url == 'https':
self.connection = connection.HTTPSConnection(host, port="8443")
else:
self.connection = connection.UnixHTTPConnection(self.unix_socket)
self.hosts = hosts.LXDHost(self.connection)
self.certificate = certificate.LXDCertificate(self.connection)
self.image = image.LXDImage(self.connection)
self.network = network.LXDNetwork(self.connection)
self.container = container.LXDContainer(self.connection)
self.profile = profiles.LXDProfile(self.connection)
# host
def host_ping(self):
pass
def host_info(self):
pass
# images
def image_list(self):
pass
def image_list_by_key(self):
pass
def image_upload(self):
pass
def image_info(self):
pass
def image_delete(self):
pass
def image_export(self):
pass
# alias
def alias_list(self):
pass
def alias_create(self):
pass
def alias_update(self):
pass
def alias_delete(self):
pass
# containers:
def container_init(self):
pass
def container_start(self):
pass
def container_stop(self):
pass
def container_destroy(self):
pass
def container_suspend(self):
pass
def container_reboot(self):
pass
def container_info(self):
pass
def container_resume(self):
pass
def get_container_log(self):
pass
def get_container_console(self):
pass
def get_container_syslog(self):
pass
# container state
def get_container_state(self):
pass
def update_container_state(self):
pass
# file operations
def get_container_file(self):
pass
def put_container_file(self):
pass
# snapshots
def container_snapshot_list(self):
pass
def container_snapshot_create(self):
pass
def container_snapshot_info(self):
pass
def container_snaphsot_delete(self):
pass
def container_run_command(self):
pass
# certificates
def certificate_list(self):
pass
def certificate_show(self):
pass
# profiles
def profile_init(self):
pass
def profile_show(self):
pass
def profile_update(self):
pass
def profile_delete(self):
pass
# lxd operations
def list_operations(self):
pass
def get_container_operation(self):
pass
# networks
def network_list(self):
pass
def network_show(self):
pass
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from . import connection
from . import container
from . import certificate
from . import hosts
from . import image
from . import network
from . import profiles
class Client(object):
def __init__(self, base_url, host):
self.unix_socket = '/var/lib/lxd/unix.socket'
self.base_url = base_url
self.host = host
if base_url == 'https':
self.connection = connection.HTTPSConnection(host, port="8443")
else:
self.connection = connection.UnixHTTPConnection(self.unix_socket)
self.hosts = hosts.LXDHost(self.connection)
self.certificate = certificate.LXDCertificate(self.connection)
self.image = image.LXDImage(self.connecton)
self.network = network.LXDNetwork(self.connection)
self.container = container.LXDContainer(self.connection)
self.profile = profiles.LXDProfile(self.connection)
# host
def host_ping(self):
pass
def host_info(self):
pass
# images
def image_list(self):
pass
def image_list_by_key(self):
pass
def image_upload(self):
pass
def image_info(self):
pass
def image_delete(self):
pass
def image_export(self):
pass
# alias
def alias_list(self):
pass
def alias_create(self):
pass
def alias_update(self):
pass
def alias_delete(self):
pass
# containers:
def container_init(self):
pass
def container_start(self):
pass
def container_stop(self):
pass
def container_destroy(self):
pass
def container_suspend(self):
pass
def container_reboot(self):
pass
def container_info(self):
pass
def container_resume(self):
pass
def get_container_log(self):
pass
def get_container_console(self):
pass
def get_container_syslog(self):
pass
# container state
def get_container_state(self):
pass
def update_container_state(self):
pass
# file operations
def get_container_file(self):
pass
def put_container_file(self):
pass
# snapshots
def container_snapshot_list(self):
pass
def container_snapshot_create(self):
pass
def container_snapshot_info(self):
pass
def container_snaphsot_delete(self):
pass
def container_run_command(self):
pass
# certificates
def certificate_list(self):
pass
def certificate_show(self):
pass
# profiles
def profile_init(self):
pass
def profile_show(self):
pass
def profile_update(self):
pass
def profile_delete(self):
pass
# lxd operations
def list_operations(self):
pass
def get_container_operation(self):
pass
# networks
def network_list(self):
pass
def network_show(self):
pass
| Python | 0.999189 |
43ccd46f3319f6afe154c5ed663143742c229074 | add voronoi_follower | tms_rc/tms_rc_double/scripts/voronoi_follower.py | tms_rc/tms_rc_double/scripts/voronoi_follower.py | # -*- coding:utf-8 -*-
import rospy
from geometry_msgs.msg import Pose2D, Twist
from tms_msg_rc_srv import rc_robot_control, rc_robot_controlResponse
from tms_msg_db.srv import TmsdbGetData, TmsdbGetDataRequest
import datetime
import pymongo
from math import sin, cos, atan2, pi, radians, degrees, sqrt
pub = rospy.Publisher("tms_rc_double/cmd_vel_mux/input/keyop)
GOAL = None
def main():
global GOAL
print "Double_voronoi_follower"
rospy.init_node ('wheelchair_voronoi_follower')
rospy.wait_for_service('/tms_db_reader')
service = rospy.Service(
"double_goal_pose" , rc_robot_control, goalPoseCallBack)
r = rospy.rate(10)
while not rospy.is_shutdown():
if None == GOAL:
continue
KPang = 0.2
KDang = 0
KPdist = 0.1
KDdist = 0
ARV_DIST = 0.25
pose = getCurrentPose()
errorX = GOAL.x - pose.x
errorY = GOAL.y - pose.y
targetT = atan2(errorY, errorX)
errorNX = errorX * cos(-pose.theta) - errorY * sin(-pose.theta)
errorNT = normalizeAng(targetT - pose.theta)
tmp_spd = limit(KPdist * errorNX, 100, -100)
tmp_turn = limit(KPang * degrees(errorNT), 30, -30)
twist = Twist()
distance = sqrt(errorX ** 2 + errorY **2)
rospy.loginfo("dist:{0}".format(distance))
rospy.loginfo("psd:{0}" "turn:{1}".format(tmp_spd, tmp_turn))
if distance <= ARV_DIST:
twist.angular.z = 0
twist.linear.x = 0
GOAL = None
else:
twist.angular.z = radians(tmp_turn)
twist.linear.x = tmp_spd
pub.publish(twist)
r.sleep()
def goalPoseCallBack(req):
global GOAL
GOAL = Pose2D()
GOAL.x = req.arg[0]
GOAL.y = req.arg[1]
GOAL.theta = radians(req.arg[2])
return rc_robot_controlResponse()
def getCurrentPose():
pose = Pose2D()
db_req = TmsdbGetDataRequest()
db_req.tmsdb.id = 2012
db_req.tmsdb.sensor = 3001
try:
srv_client = rospy.ServiceProxy("/tms_db_reader", TmsdbGetData)
res = srv_client(db_req)
if 0 == len(res.tmsdb):
return pose
pose.x = res.tmsdb[0].x
pose.y = res.tmsdb[0].y
pose.theta = res.tmsdb.ry
except rospy.ServiceException as e:
print "Service call failed: %s" %e
return pose
def normalizeAng(rad):
while rad > pi: # 角度を-180°~180°(-π~π)の範囲に合わせる
rad = rad - (2 * pi)
while rad < -pi:
rad = rad + (2 * pi)
return rad
def limit(val, maxn, minn):
return max(min(maxn, val), minn)
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass
| Python | 0.000001 | |
9e090675765a2c0c6412ee51d1e0e007404a30fd | Create k-diff-pairs-in-an-array.py | Python/k-diff-pairs-in-an-array.py | Python/k-diff-pairs-in-an-array.py | # Time: O(n)
# Space: O(n)
# Total Accepted: 5671
# Total Submissions: 20941
# Difficulty: Easy
# Contributors: murali.kf370
# Given an array of integers and an integer k,
# you need to find the number of unique k-diff pairs in the array.
# Here a k-diff pair is defined as an integer pair (i, j),
# where i and j are both numbers in the array and their absolute difference is k.
#
# Example 1:
# Input: [3, 1, 4, 1, 5], k = 2
# Output: 2
# Explanation: There are two 2-diff pairs in the array, (1, 3) and (3, 5).
# Although we have two 1s in the input, we should only return the number of unique pairs.
# Example 2:
# Input:[1, 2, 3, 4, 5], k = 1
# Output: 4
# Explanation: There are four 1-diff pairs in the array, (1, 2), (2, 3), (3, 4) and (4, 5).
# Example 3:
# Input: [1, 3, 1, 5, 4], k = 0
# Output: 1
# Explanation: There is one 0-diff pair in the array, (1, 1).
# Note:
# The pairs (i, j) and (j, i) count as the same pair.
# The length of the array won't exceed 10,000.
# All the integers in the given input belong to the range: [-1e7, 1e7].
class Solution(object):
def findPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
if k < 0: return 0
result, lookup = set(), set()
for num in nums:
if num-k in lookup:
result.add(num-k)
if num+k in lookup:
result.add(num)
lookup.add(num)
return len(result)
| Python | 0.002165 | |
586e7745f8ed76985f28a391dcf451c06af61903 | add sphinx helper functions | src/rstcheck/_sphinx.py | src/rstcheck/_sphinx.py | """Sphinx helper functions."""
import contextlib
import pathlib
import tempfile
import typing
from . import _docutils, _extras
if _extras.SPHINX_INSTALLED:
import sphinx.application
import sphinx.domains.c
import sphinx.domains.cpp
import sphinx.domains.javascript
import sphinx.domains.python
import sphinx.domains.std
@contextlib.contextmanager
def load_sphinx_if_available() -> typing.Generator[None, None, None]:
"""Contextmanager to register Sphinx directives and roles if sphinx is available."""
if _extras.SPHINX_INSTALLED:
with tempfile.TemporaryDirectory() as temp_dir:
outdir = pathlib.Path(temp_dir) / "_build"
sphinx.application.Sphinx(
srcdir=temp_dir,
confdir=None,
outdir=str(outdir),
doctreedir=str(outdir),
buildername="dummy",
status=None, # type: ignore[arg-type] # NOTE: sphinx type hint is incorrect
)
yield
else:
yield
def get_sphinx_directives_and_roles() -> typing.Tuple[typing.List[str], typing.List[str]]:
"""Return Sphinx directives and roles loaded from sphinx.
:return: Tuple of directives and roles
"""
_extras.install_guard("sphinx")
sphinx_directives = list(sphinx.domains.std.StandardDomain.directives)
sphinx_roles = list(sphinx.domains.std.StandardDomain.roles)
for domain in [
sphinx.domains.c.CDomain,
sphinx.domains.cpp.CPPDomain,
sphinx.domains.javascript.JavaScriptDomain,
sphinx.domains.python.PythonDomain,
]:
domain_directives = list(domain.directives)
domain_roles = list(domain.roles)
sphinx_directives += domain_directives + [
f"{domain.name}:{item}" for item in domain_directives
]
sphinx_roles += domain_roles + [f"{domain.name}:{item}" for item in domain_roles]
sphinx_directives += list(
sphinx.application.docutils.directives._directives # pylint: disable=protected-access
)
sphinx_roles += list(
sphinx.application.docutils.roles._roles # pylint: disable=protected-access
)
return (sphinx_directives, sphinx_roles)
def filter_whitelisted_directives_and_roles(
directives: typing.List[str], roles: typing.List[str]
) -> typing.Tuple[typing.List[str], typing.List[str]]:
"""Filter whitelisted directives and roles out of input.
:param directives: Directives to filter
:param roles: Roles to filter
:return: Tuple of fitlered directives and roles
"""
directive_whitelist = ["code", "code-block", "include"]
role_whitelist: typing.List[str] = []
directives = list(filter(lambda d: d not in directive_whitelist, directives))
roles = list(filter(lambda r: r not in role_whitelist, roles))
return (directives, roles)
def load_sphinx_ignores() -> None:
"""Register Sphinx directives and roles to ignore."""
_extras.install_guard("sphinx")
(directives, roles) = get_sphinx_directives_and_roles()
(directives, roles) = filter_whitelisted_directives_and_roles(directives, roles)
_docutils.ignore_directives_and_roles(directives, roles)
| Python | 0.000001 | |
9f66f31d42a16d8b9536a9cb160e454118ff4369 | Add tests for UninstallPathSet | tests/unit/test_req_uninstall.py | tests/unit/test_req_uninstall.py | import os
import shutil
import sys
import tempfile
import pytest
from mock import Mock
from pip.locations import running_under_virtualenv
from pip.req.req_uninstall import UninstallPathSet
class TestUninstallPathSet(object):
def setup(self):
if running_under_virtualenv():
# Construct tempdir in sys.prefix, otherwise UninstallPathSet
# will reject paths.
self.tempdir = tempfile.mkdtemp(prefix=sys.prefix)
else:
self.tempdir = tempfile.mkdtemp()
def teardown(self):
shutil.rmtree(self.tempdir, ignore_errors=True)
def test_add(self):
file_extant = os.path.join(self.tempdir, 'foo')
file_nonexistant = os.path.join(self.tempdir, 'nonexistant')
with open(file_extant, 'w'): pass
ups = UninstallPathSet(dist=Mock())
assert ups.paths == set()
ups.add(file_extant)
assert ups.paths == set([file_extant])
ups.add(file_nonexistant)
assert ups.paths == set([file_extant])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_add_symlink(self):
f = os.path.join(self.tempdir, 'foo')
with open(f, 'w'): pass
l = os.path.join(self.tempdir, 'foo_link')
os.symlink(f, l)
ups = UninstallPathSet(dist=Mock())
ups.add(l)
assert ups.paths == set([l])
| Python | 0 | |
8d8f89c82511b86fb87cef5db3bad633283283cc | Add missing migrations in develop branch | modelview/migrations/0044_auto_20191007_1227.py | modelview/migrations/0044_auto_20191007_1227.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-10-07 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0043_merge_20190425_1036'),
]
operations = [
migrations.RemoveField(
model_name='energyscenario',
name='networks_electricity_gas_electricity',
),
migrations.RemoveField(
model_name='energyscenario',
name='networks_electricity_gas_gas',
),
migrations.AlterField(
model_name='basicfactsheet',
name='logo',
field=models.ImageField(help_text='If a logo for the model exists load it up', null=True, upload_to='logos', verbose_name='Logo'),
),
migrations.AlterField(
model_name='basicfactsheet',
name='methodical_focus_1',
field=models.CharField(help_text='1-3 Keyords describing the main methodical focus of the model e.g."open source", "sector coupling"', max_length=50, verbose_name='Methodical Focus'),
),
migrations.AlterField(
model_name='basicfactsheet',
name='source_of_funding',
field=models.CharField(help_text='What is the main source of funding for the development of the model?', max_length=200, null=True, verbose_name='Source of funding'),
),
]
| Python | 0.000001 | |
4a25572283448a820cf55008e81405f3eb84a072 | Add test for unicode in env (#345) | tests/system/verbs/catkin_build/test_unicode_in_env.py | tests/system/verbs/catkin_build/test_unicode_in_env.py | import os
from ....utils import catkin_success
from ...workspace_factory import workspace_factory
def test_catkin_build_with_unicode_in_env():
with workspace_factory() as wf:
wf.create_package('foo', depends=['bar'])
wf.create_package('bar')
wf.build()
print('Workspace: {0}'.format(wf.workspace))
assert os.path.isdir(wf.workspace)
env = {'NON_ASCII': '\xc3\xb6'}
cmd = ['build', '--no-status', '--no-notify', '--verbose']
assert catkin_success(cmd, env)
| Python | 0 | |
38c091464179b5d015fd84457a4b3e242d8a3faf | Use binary color scheme for heatmap network images. | Network.py | Network.py | #!/usr/bin/env python
# Network representation and basic operations
# Daniel Klein, 5/10/2012
import numpy as np
import scipy.sparse as sparse
import networkx as nx
import matplotlib.pyplot as plt
from Covariate import NodeCovariate, EdgeCovariate
class Network:
def __init__(self, N = None):
if N:
self.N = N
self.network = sparse.lil_matrix((self.N,self.N), dtype=np.bool)
self.names = np.array(['n_%d' % n for n in range(N)])
else:
self.N = 0
self.network = None
self.names = None
# Maps from names to 1-D and 2-D arrays, respectively
self.node_covariates = {}
self.edge_covariates = {}
def tocsr(self):
if self.is_sparse():
self.network = self.network.tocsr()
else:
print 'Attempting CSR conversion of a non-sparse network.'
raise
def new_node_covariate(self, name):
self.node_covariates[name] = NodeCovariate(self.names)
return self.node_covariates[name]
def new_edge_covariate(self, name):
self.edge_covariates[name] = EdgeCovariate(self.names)
return self.edge_covariates[name]
def subnetwork(self, inds):
sub_N = len(inds)
sub = Network()
if self.is_sparse():
self.tocsr()
sub.network = self.network[inds][:,inds]
sub.names = self.names[inds]
sub.N = sub_N
sub.node_covariates = {}
for node_covariate in self.node_covariates:
src = self.node_covariates[node_covariate]
sub.new_node_covariate(node_covariate).from_existing(src, inds)
sub.edge_covariates = {}
for edge_covariate in self.edge_covariates:
src = self.edge_covariates[edge_covariate]
sub.new_edge_covariate(edge_covariate).from_existing(src, inds)
return sub
def generate(self, model, *opts):
self.network = model.generate(self, *opts)
def network_from_file_gexf(self, path):
in_network = nx.read_gexf(path)
self.N = in_network.number_of_nodes()
self.names = np.array(in_network.nodes())
self.network = sparse.lil_matrix((self.N,self.N), dtype=np.bool)
name_to_index = {}
for i, n in enumerate(self.names):
name_to_index[n] = i
for s, t in in_network.edges():
self.network[name_to_index[s],name_to_index[t]] = True
def network_from_edges(self, edges):
# First pass over edges to determine names and number of nodes
names = set()
N = 0
for n_1, n_2 in edges:
if not n_1 in names:
names.add(n_1)
N += 1
if not n_2 in names:
names.add(n_2)
N += 1
# Process list of names and assign indices
self.N = N
self.network = sparse.lil_matrix((self.N,self.N), dtype=np.bool)
self.names = np.array(list(names))
name_to_index = {}
for i, n in enumerate(self.names):
name_to_index[n] = i
# Second pass over edges to populate network
for n_1, n_2 in edges:
self.network[name_to_index[n_1],name_to_index[n_2]] = True
def nodes(self):
return self.names
def adjacency_matrix(self):
if self.is_sparse():
return self.network.todense()
else:
return self.network
def is_sparse(self):
return sparse.issparse(self.network)
def sparse_adjacency_matrix(self):
if self.is_sparse():
return np.array(self.network)
else:
print 'Asked for sparse adjacency matrix of non-sparse network.'
raise
def show(self):
graph = nx.DiGraph()
for n in self.nodes():
graph.add_node(n)
if self.is_sparse():
nonzeros = set()
nz_i, nz_j = self.network.nonzero()
for n in range(self.network.nnz):
graph.add_edge(self.names[nz_i[n]],self.names[nz_j[n]])
else:
for i in range(self.N):
for j in range(self.N):
if self.network[i,j]:
graph.add_edge(self.names[i], self.names[j])
nx.draw_graphviz(graph)
plt.show()
def show_heatmap(self, order_by = None):
if order_by:
title = 'Ordered by node covariate "%s"' % order_by
o = np.argsort(self.node_covariates[order_by][:])
else:
title, o = 'Unordered', np.arange(self.N)
plt.figure()
A = self.adjacency_matrix()
plt.imshow(A[o][:,o])
plt.set_cmap('binary')
plt.title(title)
plt.show()
# Some "tests"
if __name__ == '__main__':
net = Network()
net.network_from_file_gexf('test.gexf')
net.new_node_covariate('x_0')
net.node_covariates['x_0'].from_pairs([str(i) for i in range(10)],
[i**2 for i in range(10)])
net.new_node_covariate('x_1')
net.node_covariates['x_1'].data[:] = np.random.normal(2,1,net.N)
def f_self(n_1, n_2):
return n_1 == n_2
net.new_edge_covariate('self_edge').from_binary_function_name(f_self)
def f_first_half_dir(n_1, n_2):
return (n_1 < n_2) and (n_2 in ['0','1','2','3','4'])
net.new_edge_covariate('ec_2').from_binary_function_name(f_first_half_dir)
print net.node_covariates['x_0']
print net.node_covariates['x_1']
print net.edge_covariates['self_edge']
print net.edge_covariates['ec_2']
print net.adjacency_matrix()
print net.nodes()
net.show()
net_2 = net.subnetwork(np.array([5,0,1,6]))
print net_2.adjacency_matrix()
print net_2.node_covariates['x_0']
print net_2.node_covariates['x_1']
print net_2.edge_covariates['self_edge']
print net_2.edge_covariates['ec_2']
net_2.show()
| Python | 0 | |
53f91164ce93a01c2ad628fd49109a5fa8917ecb | Extend datasource model schema (#2342) | timesketch/migrations/versions/180a387da650_extend_datasource_model_with_total_file_.py | timesketch/migrations/versions/180a387da650_extend_datasource_model_with_total_file_.py | """Extend datasource model with total file events field
Revision ID: 180a387da650
Revises: 75af34d75b1e
Create Date: 2022-09-26 13:04:10.336534
"""
# This code is auto generated. Ignore linter errors.
# pylint: skip-file
# revision identifiers, used by Alembic.
revision = '180a387da650'
down_revision = '75af34d75b1e'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('datasource', sa.Column('total_file_events', sa.BigInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('datasource', 'total_file_events')
# ### end Alembic commands ###
| Python | 0 | |
63dc7fb2586824b6a6de52b1ba80e6196d80ff42 | Create credentials.py | credentials.py | credentials.py | # add your primary statsnz key here
# available from https://statisticsnz.portal.azure-api.net/
statsnz_key = "MY_SECRET_KEY"
| Python | 0.000001 | |
2aadd55510684c4065c1bed1c1387ee57b18fd77 | Add a prototype of Simulated Annealing Algorithm, and a TSP example. | OpenCLGA/sa.py | OpenCLGA/sa.py | #!/usr/bin/python3
from abc import ABCMeta
from utils import calc_linear_distance, plot_tsp_result
import math
import random
class SAImpl(metaclass = ABCMeta):
def __init__(self):
pass
## Calculate the cost of the solution
def cost(self, solution):
pass
## Return a new neighbor solution
def neighbor(self, solution):
pass
## Return a probability to decide whether accpet or not.
def acceptance_probability(self, old_cost, new_cost, temperature):
pass
## Start annealing
def anneal(self):
pass
class TSPSolution(SAImpl):
def __init__(self, city_info):
SAImpl.__init__(self)
self.city_info = city_info
self.temperature = 1000.0
self.alpha = 0.9
self.terminate_temperature = 0.00001
self.iterations = 500
@staticmethod
def get_init_params():
num_cities = 20
random.seed()
city_ids = list(range(0, num_cities))
city_info = {city_id: (random.random() * 100, random.random() * 100) for city_id in city_ids}
return city_info
## For TSP, we calculate the total distance between all cities.
def cost(self, solution):
total = len(self.city_info.keys())
cost = 0
for index, cid in enumerate(solution):
first_city = cid
next_city = solution[(index + 1) % total]
cost += calc_linear_distance(self.city_info[first_city][0], self.city_info[first_city][1],
self.city_info[next_city][0], self.city_info[next_city][1])
return cost
## Find a neighbor solution by swapping random two nodes.
def neighbor(self, solution):
neighbor = solution[:]
total = len(self.city_info.keys())
a = random.randint(0, total-1)
b = random.randint(0, total-1)
while a == b:
b = random.randint(0, total-1)
neighbor[a] = solution[b]
neighbor[b] = solution[a]
return neighbor
def acceptance_probability(self, old_cost, new_cost, temperature):
if new_cost < old_cost:
return 1.0
else:
return math.exp(float(old_cost - new_cost) / temperature)
def anneal(self):
solution = list(self.city_info.keys())
random.shuffle(solution)
old_cost = self.cost(solution)
# print('1st round : cost = {} '.format(old_cost))
T = self.temperature
T_min = self.terminate_temperature
alpha = self.alpha
while T > T_min:
i = 1
print('T={}'.format(T))
while i <= self.iterations:
new_solution = self.neighbor(solution)
new_cost = self.cost(new_solution)
ap = self.acceptance_probability(old_cost, new_cost, T)
if ap > random.random():
solution = new_solution
old_cost = new_cost
# print('i={} round : cost = {} '.format(T, i, old_cost))
i += 1
T = T*alpha
plot_tsp_result(self.city_info, solution)
return solution
class SimulatedAnnealing(object):
def __init__(self, cls_solution):
self.sas = cls_solution(cls_solution.get_init_params())
pass
## To save the annealing state
def save(self):
pass
## To restore the annealing state
def restore(self):
pass
## Start annealing
def anneal(self):
best_solution = self.sas.anneal()
pass
sa = SimulatedAnnealing(TSPSolution)
sa.anneal() | Python | 0 | |
41e21884418cdd2b525b4f02d1cfa4ed9ea2c000 | Add bug test for 9268 (#65) | bugs/issue_9268.py | bugs/issue_9268.py | # RUN: %PYTHON %s
# XFAIL: *
import iree.compiler.tools.tflite as iree_tflite
# https://github.com/iree-org/iree/issues/9268
ir = '''
func.func @main(%a : tensor<f32>, %b : tensor<f32>) -> tensor<*xf32> {
%val = "tfl.add"(%a, %b) {fused_activation_function = "NONE"} : (tensor<f32>, tensor<f32>) -> tensor<*xf32>
return %val : tensor<*xf32>
}
'''
print(ir)
ir = iree_tflite.compile_str(ir, target_backends=["cpu"])
| Python | 0 | |
8c401af5bb7c3678de4091b88d81e04ddf248705 | Remove unused 'fahrenheit' config option | src/collectors/lmsensors/lmsensors.py | src/collectors/lmsensors/lmsensors.py | # coding=utf-8
"""
This class collects data from libsensors. It should work against libsensors 2.x
and 3.x, pending support within the PySensors Ctypes binding:
[http://pypi.python.org/pypi/PySensors/](http://pypi.python.org/pypi/PySensors/)
Requires: 'sensors' to be installed, configured, and the relevant kernel
modules to be loaded. Requires: PySensors requires Python 2.6+
If you're having issues, check your version of 'sensors'. This collector
written against: sensors version 3.1.2 with libsensors version 3.1.2
#### Dependencies
* [PySensors](http://pypi.python.org/pypi/PySensors/)
"""
import diamond.collector
try:
import sensors
sensors # workaround for pyflakes issue #13
except ImportError:
sensors = None
class LMSensorsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(LMSensorsCollector, self).get_default_config_help()
config_help.update({
'send_zero': 'Send sensor data even when there is no value'
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(LMSensorsCollector, self).get_default_config()
config.update({
'path': 'sensors',
'send_zero': 'False'
})
return config
def collect(self):
if sensors is None:
self.log.error('Unable to import module sensors')
return {}
sensors.init()
try:
for chip in sensors.iter_detected_chips():
for feature in chip:
label = feature.label.replace(' ', '-')
try:
value = feature.get_value()
except:
if self.config['send_zero']:
value = 0
if value is not None:
self.publish(".".join([str(chip), label]), value)
finally:
sensors.cleanup()
| # coding=utf-8
"""
This class collects data from libsensors. It should work against libsensors 2.x
and 3.x, pending support within the PySensors Ctypes binding:
[http://pypi.python.org/pypi/PySensors/](http://pypi.python.org/pypi/PySensors/)
Requires: 'sensors' to be installed, configured, and the relevant kernel
modules to be loaded. Requires: PySensors requires Python 2.6+
If you're having issues, check your version of 'sensors'. This collector
written against: sensors version 3.1.2 with libsensors version 3.1.2
#### Dependencies
* [PySensors](http://pypi.python.org/pypi/PySensors/)
"""
import diamond.collector
try:
import sensors
sensors # workaround for pyflakes issue #13
except ImportError:
sensors = None
class LMSensorsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(LMSensorsCollector, self).get_default_config_help()
config_help.update({
'fahrenheit': "True/False",
'send_zero': 'Send sensor data even when there is no value'
})
return config_help
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(LMSensorsCollector, self).get_default_config()
config.update({
'path': 'sensors',
'fahrenheit': 'True',
'send_zero': 'False'
})
return config
def collect(self):
if sensors is None:
self.log.error('Unable to import module sensors')
return {}
sensors.init()
try:
for chip in sensors.iter_detected_chips():
for feature in chip:
label = feature.label.replace(' ', '-')
try:
value = feature.get_value()
except:
if self.config['send_zero']:
value = 0
if value is not None:
self.publish(".".join([str(chip), label]), value)
finally:
sensors.cleanup()
| Python | 0.000004 |
f3e1b1404f32cd0195aa8148d1ab4285cf9ad352 | Add class BaseSpider | Spiders.py | Spiders.py | '''
Created on 2 сент. 2016 г.
@author: garet
'''
class BaseSpider():
def __init__(self):
pass
def AddUrls(self, urls):
pass
def Routing(self, url):
pass
def SaveCache(self, url, data=None):
pass
def GetCache(self, url):
pass
def Run(self):
pass
| Python | 0.000001 | |
e5be29bc3c5a77493fe64bb3fc8b52611cc13469 | Add tests for Generic Interface. | zerver/tests/test_outgoing_webhook_interfaces.py | zerver/tests/test_outgoing_webhook_interfaces.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from typing import Any
import mock
import json
from requests.models import Response
from zerver.lib.test_classes import ZulipTestCase
from zerver.outgoing_webhooks.generic import GenericOutgoingWebhookService
class Test_GenericOutgoingWebhookService(ZulipTestCase):
def setUp(self):
# type: () -> None
self.event = {
u'command': '@**test**',
u'message': {
'content': 'test_content',
}
}
self.handler = GenericOutgoingWebhookService(service_name='test-service',
base_url='http://example.domain.com',
token='abcdef',
user_profile=None)
def test_process_event(self):
# type: () -> None
rest_operation, request_data = self.handler.process_event(self.event)
request_data = json.loads(request_data)
self.assertEqual(request_data['data'], "@**test**")
self.assertEqual(request_data['token'], "abcdef")
self.assertEqual(rest_operation['base_url'], "http://example.domain.com")
self.assertEqual(rest_operation['method'], "POST")
self.assertEqual(request_data['message'], self.event['message'])
def test_process_success(self):
# type: () -> None
response = mock.Mock(spec=Response)
response.text = json.dumps({"response_not_required": True})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, None)
response.text = json.dumps({"response_string": 'test_content'})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, 'test_content')
response.text = json.dumps({})
success_response = self.handler.process_success(response, self.event)
self.assertEqual(success_response, "")
def test_process_failure(self):
# type: () -> None
response = mock.Mock(spec=Response)
response.text = 'test_content'
success_response = self.handler.process_failure(response, self.event)
self.assertEqual(success_response, 'test_content')
| Python | 0 | |
14d99c3d697c069abf188dac5d53a7169010160b | Add migration for version 2 tables | migrations/versions/8c431c5e70a8_v2_tables.py | migrations/versions/8c431c5e70a8_v2_tables.py | """v2 tables
Revision ID: 8c431c5e70a8
Revises: 4ace74bc8168
Create Date: 2021-07-05 21:59:13.575188
"""
# revision identifiers, used by Alembic.
revision = "8c431c5e70a8"
down_revision = "4ace74bc8168"
import sqlalchemy as sa
from alembic import op
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"organizations",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column(
"default_dashboard_template_id", sa.Integer(), nullable=True
),
sa.Column("slug", sa.Unicode(length=255), nullable=False),
sa.Column("title", sa.Unicode(length=255), nullable=False),
sa.Column("layout", sa.Integer(), nullable=False),
sa.Column("fastly_support", sa.Boolean(), nullable=False),
sa.Column("root_domain", sa.Unicode(length=255), nullable=False),
sa.Column("root_path_prefix", sa.Unicode(length=255), nullable=False),
sa.Column("fastly_domain", sa.Unicode(length=255), nullable=True),
sa.Column(
"fastly_encrypted_api_key", sa.String(length=255), nullable=True
),
sa.Column("fastly_service_id", sa.Unicode(length=255), nullable=True),
sa.Column("bucket_name", sa.Unicode(length=255), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("slug"),
)
op.create_table(
"dashboardtemplates",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("organization_id", sa.Integer(), nullable=True),
sa.Column("comment", sa.UnicodeText(), nullable=True),
sa.Column("bucket_prefix", sa.Unicode(length=255), nullable=False),
sa.Column("created_by_id", sa.Integer(), nullable=True),
sa.Column("date_created", sa.DateTime(), nullable=False),
sa.Column("deleted_by_id", sa.Integer(), nullable=True),
sa.Column("date_deleted", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["created_by_id"],
["users.id"],
),
sa.ForeignKeyConstraint(
["deleted_by_id"],
["users.id"],
),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("bucket_prefix"),
)
op.create_table(
"tags",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("organization_id", sa.Integer(), nullable=True),
sa.Column("slug", sa.Unicode(length=255), nullable=False),
sa.Column("title", sa.Unicode(length=255), nullable=False),
sa.Column("comment", sa.UnicodeText(), nullable=True),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("slug", "organization_id"),
sa.UniqueConstraint("title", "organization_id"),
)
with op.batch_alter_table("tags", schema=None) as batch_op:
batch_op.create_index(
batch_op.f("ix_tags_organization_id"),
["organization_id"],
unique=False,
)
op.create_table(
"producttags",
sa.Column("tag_id", sa.Integer(), nullable=False),
sa.Column("product_id", sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
["product_id"],
["products.id"],
),
sa.ForeignKeyConstraint(
["tag_id"],
["tags.id"],
),
sa.PrimaryKeyConstraint("tag_id", "product_id"),
)
with op.batch_alter_table("builds", schema=None) as batch_op:
batch_op.add_column(
sa.Column("git_ref", sa.Unicode(length=255), nullable=True)
)
batch_op.add_column(
sa.Column("uploaded_by_id", sa.Integer(), nullable=True)
)
batch_op.create_foreign_key(None, "users", ["uploaded_by_id"], ["id"])
with op.batch_alter_table("editions", schema=None) as batch_op:
batch_op.add_column(sa.Column("kind", sa.Integer(), nullable=True))
with op.batch_alter_table("products", schema=None) as batch_op:
batch_op.add_column(
sa.Column("organization_id", sa.Integer(), nullable=True)
)
batch_op.create_foreign_key(
None, "organizations", ["organization_id"], ["id"]
)
def downgrade():
with op.batch_alter_table("products", schema=None) as batch_op:
batch_op.drop_constraint(None, type_="foreignkey")
batch_op.drop_column("organization_id")
with op.batch_alter_table("editions", schema=None) as batch_op:
batch_op.drop_column("kind")
with op.batch_alter_table("builds", schema=None) as batch_op:
batch_op.drop_constraint(None, type_="foreignkey")
batch_op.drop_column("uploaded_by_id")
batch_op.drop_column("git_ref")
op.drop_table("producttags")
with op.batch_alter_table("tags", schema=None) as batch_op:
batch_op.drop_index(batch_op.f("ix_tags_organization_id"))
op.drop_table("tags")
op.drop_table("dashboardtemplates")
op.drop_table("organizations")
| Python | 0 | |
48d38c28212c0b3ac8bb8ee324221d94b07e84ee | Add initial Domain Tools module | misp_modules/modules/expansion/domaintools.py | misp_modules/modules/expansion/domaintools.py | import json
import logging
import sys
from domaintools import API
log = logging.getLogger('domaintools')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
misperrors = {'error': 'Error'}
mispattributes = {
'input': ['domain'],
'output': ['whois-registrant-email', 'whois-registrant-phone', 'whois-registrant-name',
'whois-registrar', 'whois-creation-date', 'freetext']
}
moduleinfo = {
'version': '0.1',
'author': 'Raphaël Vinot',
'description': 'DomainTools MISP expansion module.',
'module-type': ['expansion', 'hover']
}
moduleconfig = ['username', 'api_key']
class DomainTools(object):
def __init__(self):
self.reg_mail = set()
self.reg_phone = set()
self.reg_name = set()
self.registrar = set()
self.creation_date = set()
self.freetext = ''
def dump(self):
to_return = []
if self.reg_mail:
to_return.append({'type': ['whois-registrant-email'], 'values': list(self.reg_mail)})
if self.reg_phone:
to_return.append({'type': ['whois-registrant-phone'], 'values': list(self.reg_phone)})
if self.reg_name:
to_return.append({'type': ['whois-registrant-name'], 'values': list(self.reg_name)})
if self.registrar:
to_return.append({'type': ['whois-registrar'], 'values': list(self.registrar)})
if self.creation_date:
to_return.append({'type': ['whois-creation-date'], 'values': list(self.creation_date)})
if self.freetext:
to_return.append({'type': ['freetext'], 'values': [self.freetext]})
return to_return
def handler(q=False):
if not q:
return q
request = json.loads(q)
to_query = None
for t in mispattributes['input']:
to_query = request.get(t)
if to_query:
break
if not to_query:
misperrors['error'] = "Unsupported attributes type"
return misperrors
if request.get('config'):
if (request['config'].get('username') is None) or (request['config'].get('api_key') is None):
misperrors['error'] = 'DomainTools authentication is incomplete'
return misperrors
else:
domtools = API(request['config'].get('username'), request['config'].get('api_key'))
else:
misperrors['error'] = 'DomainTools authentication is missing'
return misperrors
whois_entry = domtools.parsed_whois(to_query)
values = DomainTools()
if whois_entry.has_key('error'):
misperrors['error'] = whois_entry['error']['message']
return misperrors
if whois_entry.has_key('registrant'):
values.reg_name.add(whois_entry['registrant'])
if whois_entry.has_key('registration'):
values.creation_date.add(whois_entry['registration']['created'])
if whois_entry.has_key('whois'):
values.freetext = whois_entry['whois']['record']
if whois_entry.emails():
# NOTE: not sure we want to do that (contains registrar emails)
values.reg_mail |= whois_entry.emails()
if whois_entry.has_key('parsed_whois'):
if whois_entry['parsed_whois']['created_date']:
values.creation_date.add(whois_entry['parsed_whois']['created_date'])
if whois_entry['parsed_whois']['registrar']['name']:
values.registrar.add(whois_entry['parsed_whois']['registrar']['name'])
for key, entry in whois_entry['parsed_whois']['contacts'].items():
# TODO: pass key as comment
if entry['email']:
values.reg_mail.add(entry['email'])
if entry['phone']:
values.reg_phone.add(entry['phone'])
if entry['name']:
values.reg_name.add(entry['name'])
return json.dumps({'results': values.dump()})
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| Python | 0 | |
1f80f3cc606d9c42e41e30108e97f776b02803c5 | Create abcprob.py | abcprob.py | abcprob.py | # by beepingmoon, 2014-07-22
# abc problem, http://rosettacode.org/wiki/ABC_Problem
import time
class Blok:
def __init__(self, znaki, czyDostepny = True):
self.znaki = znaki
self.czyDostepny = czyDostepny
def sprawdzZnaki(self, znak):
for z in self.znaki:
if z == znak:
return True
return False
bloki = [Blok('ob'),Blok('xk'),Blok('dq'),Blok('cp'),Blok('na'),
Blok('gt'),Blok('re'),Blok('tg'),Blok('qd'),Blok('fs'),Blok('jw'),
Blok('hu'),Blok('vi'),Blok('an'),Blok('ob'),Blok('er'),Blok('fs'),
Blok('ly'),Blok('pc'),Blok('zm')]
def resetuj():
for b in bloki:
b.czyDostepny = True
def funkcjaABC(bloki, slowo, indeks):
if indeks == len(slowo):
return True
for blok in bloki:
if blok.czyDostepny == False:
continue
if blok.sprawdzZnaki(slowo[indeks]) == True:
blok.czyDostepny = False
if funkcjaABC(bloki, slowo, indeks+1):
return True
blok.czyDostepny = True
return False
# check long arbitrary string in this file
f = open("slowo.txt",'r')
data = f.read()
f.close()
start = time.time()
print funkcjaABC(bloki, data, 0)
print "Czas szukania: %f sekund " % (time.time() - start)
resetuj()
#print funkcjaABC(bloki, 'a', 0) # true
#resetuj()
#print funkcjaABC(bloki, 'bark', 0) # true
#resetuj()
#print funkcjaABC(bloki, 'book', 0) # false
#resetuj()
#print funkcjaABC(bloki, 'treat', 0) # true
#resetuj()
#print funkcjaABC(bloki, 'common', 0) # false
#resetuj()
#print funkcjaABC(bloki, 'squad', 0) # true
#resetuj()
#print funkcjaABC(bloki, 'confuse', 0) # true
| Python | 0.99997 | |
b7b29a00b1a2e448d78c8f3c4333753668589e16 | Create __init__.py | etc/__init__.py | etc/__init__.py | Python | 0.000429 | ||
e1ea3859b08a14c80ccd65fc5551336bdc760f96 | add biggan projukti blog | corpus_builder/spiders/public_blog/biggan_projukti.py | corpus_builder/spiders/public_blog/biggan_projukti.py | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from corpus_builder.templates.spider import CommonSpider
class BigganProjuktiSpider(CommonSpider):
name = 'biggan_projukti'
allowed_domains = ['www.bigganprojukti.com', 'bigganprojukti.com']
base_url = 'http://www.bigganprojukti.com/'
start_request_url = base_url
content_body = {
'css': 'div.td-post-content p::text'
}
rules = (
Rule(LinkExtractor(
restrict_css='div.td-main-content h3.entry-title'
),
callback='parse_content'),
)
allowed_configurations = [
['start_page'],
['start_page', 'end_page']
]
def request_index(self, response):
for page in range(self.start_page + 1, self.end_page + 1):
yield scrapy.Request(self.base_url + 'page/{page}'.format(page=page))
| Python | 0 | |
204e6fc49bcc739f1e5c53bfbfc3eb7e86a7640c | Add windows autostart. | StartAtBoot.py | StartAtBoot.py | import sys
if sys.platform.startswith('win'):
from PyQt4.QtCore import QSettings
RUN_PATH = "HKEY_CURRENT_USER\\Software\\Microsoft\\Windows\\CurrentVersion\\Run"
settings = QSettings(RUN_PATH, QSettings.NativeFormat)
settings.setValue("Anki", sys.argv[0])
# to remove that:
# self.settings.remove("Anki")
| Python | 0 | |
7919fa239e597c0358b518740aa2657b49caddbf | add oop_advance | src/python27/oop_advance/slots.py | src/python27/oop_advance/slots.py | # -*- coding: utf-8 -*-
class Student(object):
pass
s = Student()
s.name = 'Tim Ho'
print s.name
def set_age(self, age):
self.age = age
from types import MethodType
s.set_age = MethodType(set_age, s, Student)
s.set_age(25)
print s.age
s2 = Student()
# s2.set_age(25)
def set_score(self, score):
self.score = score
Student.set_score = MethodType(set_score, None, Student)
s.set_score(100)
print s.score
s2.set_score(99)
print s2.score
class Student2(object):
__slots__ = ('name', 'age')
s3 = Student2()
s3.name = 'Tim Ho'
s3.age = 25
#s3.score = 99
print s3.name
print s3.age
| Python | 0.000133 | |
d19f92ef1bb97407607c588db86a0b55a60ebb2f | Create Auditor.py | auditor.py | auditor.py | import Tkinter
import string
import subprocess
from Tkinter import *
from subprocess import call
from collections import defaultdict
#Requires whois by Mark Russinovich:
#http://technet.microsoft.com/en-us/sysinternals/bb897435.aspx
#using the documentation from:
#http://www.tutorialspoint.com/python/python_gui_programming.htm
#########################################################
#begin window layout
#initialize a Tk window
main = Tkinter.Tk()
frame = Frame(main)
frame.pack()
middleframe = Frame(main)
middleframe.pack(side=TOP)
middle1frame = Frame(main)
middle1frame.pack(side=TOP)
bottomframe = Frame(main)
bottomframe.pack(side=LEFT)
bottomframe1 = Frame(main)
bottomframe1.pack(side=LEFT)
##########################################################
#initialize a host entry box, label, and action button
#initialize a variable to hold the textbox entry
hostName = StringVar()
#Generates a field for the entry of ip address/addresses
hostNameField = Entry(frame, textvariable=hostName)
#generate a geometric box around your object and place it
hostNameField.pack(side = LEFT)
hostNamelabelstring = StringVar()
hostNamelabelstring.set("Hostname or single IP")
hostNamelabel = Label(frame, textvariable=hostNamelabelstring)
hostNamelabel.pack(side = LEFT)
############################################################
#initialize a host entry box, label, and action button
#initialize a variable to hold the textbox entry
referenceServerName = StringVar()
#initialize things
referenceServerName.set("208.72.105.3")
#Generates a field for the entry of ip address/addresses
referenceServerNameField = Entry(middleframe, textvariable=referenceServerName)
#generate a geometric box around your object and place it
referenceServerNameField.pack(side = LEFT)
referenceServerNamelabelstring = StringVar()
referenceServerNamelabelstring.set("Reference Server (always recursive)")
referenceServerNamelabel = Label(middleframe, textvariable=referenceServerNamelabelstring)
referenceServerNamelabel.pack(side = LEFT)
############################################################
#initialize a host entry box, label, and action button
#initialize a variable to hold the textbox entry
targetServerName = StringVar()
#initialize things
targetServerName.set("ns.ori.net")
#Generates a field for the entry of ip address/addresses
targetServerNameField = Entry(middle1frame, textvariable=targetServerName)
#generate a geometric box around your object and place it
targetServerNameField.pack(side = LEFT)
targetServerNamelabelstring = StringVar()
targetServerNamelabelstring.set("Target Server (usually authoritative)")
targetServerNamelabel = Label(middle1frame, textvariable=targetServerNamelabelstring)
targetServerNamelabel.pack(side = LEFT)
##############################################################
#initialize results pane for WHOIS
#initialize a variable to hold the textbox entry
whoisResults = Text(bottomframe1)
whoisResults.pack(side=BOTTOM)
#looks up with whois the things in the textbox upon clicking the button
################################################################################
#TODO: Rewrite this function, it's got a nasty implementation, where it only displays
#the last ping task result. *sigh* only so much time in the day.
################################################################################
def checkHost(serverName):
#print hostNameEnum(hostName.get())
#a = hostNameEnum()
#for i in a:
# print i
a = hostNameEnum()
for i in a:
#print(check(str(i)))
results = str(check(str(i), serverName))
if str(resultslabelstring.get()) == "empty":
resultslabelstring.set(results)
else:
resultslabelstring.set(resultslabelstring.get() + results)
def compareHost():
resultslabelstring.set("empty")
checkHost(str(referenceServerName.get()))
checkHost(str(targetServerName.get()))
#add the whois function call
#whoisResults.insert(
#print(whois(str(hostName.get())))
whoisResults.delete("1.0", END)
whoisResults.insert(INSERT, (whois(str(hostName.get()))))
#call compareHost on button click
#object = Widget(Windoname, text=window title, command=function to call, optional things)
hostNameCheck = Button(frame, text='check', command=compareHost)
#generate a geometric box around your object and place it
hostNameCheck.pack(side = TOP)
resultslabelstring = StringVar()
resultslabelstring.set("empty")
resultslabel = Label(bottomframe, textvariable=resultslabelstring, height=45, width = 60)
resultslabel.pack(side = TOP)
#########################################################
#end of window layout
#########################################################
#create a class to instantiate a command line, passing arguments to the object
class commandline:
''' Parses a command list formatted thusly:
['ping', '-c', '4', 'ip address']
for command line usage, parameterizes two
strings and standard output is string out, and err'''
#calling commandline with a command will return 2 vars:
#self.out and self.err, the result of mapping stdout
#and stderr is a string
def __init__(self, args):
#Fist, let's pick up the arguments and define a method of using them
self.args = args
#next, let's spawn a process using self.args, and piping the IO
proc = subprocess.Popen(self.args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=subprocess.SW_HIDE, shell=True)
#finally, set self.out and self.error to stderr and stdout
self.out, self.err = proc.communicate()
#calling commandline with a command will return 2 vars:
#self.out and self.err, the result of mapping
#stdout and stderr is a string
#now, let's provide output of the command as a string
def __str__(self):
#This is a trainwreck - it returns either error or result of checking
return str(self.out)
class check(commandline):
''' checks a host'''
def __init__(self, host, server):
self.host = host
self.server = server
def __repr__(self):
self.out = commandline(['nslookup', self.host, self.server]).out
return self.out
def __str__(self):
self.out = commandline(['nslookup', self.host, self.server]).out
return str(self.out)
class whois(commandline):
''' checks a host'''
def __init__(self, host):
self.host = host
def __repr__(self):
self.out = commandline(['whois', '-v', self.host]).out
return self.out
def __str__(self):
self.out = commandline(['whois', '-v', self.host]).out
return str(self.out)
#class to take an ip or a range, and either do something useful, or iterate
#and die trying
################################################################################
#TODO: Horrible bug, if you start from the middle of a range, and add 1
#for example "192.168.1.11-15" you get check(192.168.1.1-25) results, WTF!!
################################################################################
class hostNameEnum:
#gets the host from a text input field
def __init__(self):
self.hostName = hostName.get()
self.hostList = []
self.tempHost = []
#would be shocked if this works at all right out of the gate:
if len(string.split(self.hostName, sep=";")) > 1:
for i in range(0, len(self.hostName)):
self.tempHost = string.split(str(self.hostName, sep=";")[i])
self.hostList.append(self.tempHost[i])
else:
self.hostList.append(self.hostName)
def __repr__(self):
return list.self.hostList
def __str__(self):
return str(self.hostList)
def __getitem__(self, i):
return str(self.hostList[i])
#########################################################
#call window!
main.mainloop()
| Python | 0 | |
f576b7b151c6c74eea668e66fff54ab2c33f39d6 | add 100 | Volume2/100.py | Volume2/100.py | if __name__ == "__main__":
b, n, L = 85, 120, 10 ** 12
while n <= L:
b, n = 3 * b + 2 * n - 2, 4 * b + 3 * n - 3
print b, n
| Python | 0.999998 | |
68efa8a0fb206da8cd1410d74572520f558ebded | Create apriori.py | apriori.py | apriori.py | def preprocessing(data):
""" preprocesses data to be applicable to apriori
Parameters
----------
data : tbd
Returns
---------
list of sets
"""
pass
class apriori():
""" Frequent Itemsets using the apriori algorithm
Parameters
----------
baskets : list of sets
max_set_size : int, default None
determine frequent item sets up to max_set_size items
if None, determine alls frequent item sets
s : float >0 and <=1
minimum threshold for item sets to count as frequent
rules : boolen
if True return association rules additionally to frequent item sets
confidence : boolean
if True compute confidence of association rule. Only viable if rules is True
interest : boolean
if True compute interest of association rule. Only viable if rules is True
"""
def __init__(self, baskets, max_set_size = None, s = 0.1,
rules = False, confidence=False, interest=False):
self.baskets = baskets
self.max_set_size = max_set_size
self.s = s
self.rules = rules
self.confidence = confidence
self.interest = interest
def compute(self):
""" Applies the apriori algorithm to baskets
"""
pass
def _initialize(self):
pass
def _construct(self):
pass
def _filter(self):
pass
def _construct_and_count(self, j, frequent_tuples):
if j == 1:
# count items ind baskets and return
if j > 1:
# for every basket, filter tuples subset of basket
# double loop through filtered tuples
# if tuple difference is j-2, unite and count unison
# if count(unison) = j add tuple to output and increase count
#memoization?
| Python | 0.000006 | |
8adf39f011d8290c07f01e807b65373e40b4c314 | Create score.py | score.py | score.py | """ Requires sox and text2wave (via festival)
"""
from pippi import dsp
from pippi import tune
import subprocess
import os
def sox(cmd, sound):
path = os.getcwd()
filename_in = '/proc-in'
filename_out = '/proc-out.wav'
dsp.write(sound, filename_in)
cmd = cmd % (path + filename_in + '.wav', path + filename_out)
subprocess.call(cmd, shell=True)
sound = dsp.read(path + filename_out).data
return sound
def text2wave(lyrics):
path = os.getcwd() + '/bag.wav'
cmd = "echo '%s' | /usr/bin/text2wave -o %s" % (lyrics, path)
ret = subprocess.call(cmd, shell=True)
words = dsp.read('bag.wav').data
return words
def singit(lyrics, mult):
words = text2wave(lyrics)
pitches = [ dsp.randint(1, 10) for i in range(dsp.randint(2, 4)) ]
pitches = tune.fromdegrees(pitches, octave=dsp.randint(1, 4), root='a')
sings = [ dsp.pine(words, dsp.flen(words) * mult, pitch) for pitch in pitches ]
sings = dsp.mix(sings)
sings = sox("sox %s %s tempo 5.0", sings)
return sings
verses = [
'sing a ling a ling a',
'ding ling a sing ling ding a',
'ee oh ee oh see low',
'me low see low tree low',
'ping a ding a ding a',
'sling ding a bing ling ding a',
'ee oh ee oh see low',
'me low see low tree low',
'sing a ling a ling a',
'ding ling a sing ling ding a',
'ee oh ee oh see low',
'me low see low tree low',
]
layers = []
# v1: 1 layers, 50 - 1000 mult
# v2: 3 layers, 50 - 1000 mult
# v3: 2 layers, 50 - 100 mult
for l in range(2):
out = ''.join([ singit(lyric, dsp.randint(50, 100)) for lyric in verses ])
layers += [ out ]
out = dsp.mix(layers)
dsp.write(out, 'sing')
| Python | 0.000008 | |
0ac53ef31a47c61382557b9fb3ba588fd4e1ae67 | Add first working setup.py script | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='pygame_maker',
version='0.1',
description='ENIGMA-like pygame-based game engine',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPGv2)',
'Progamming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: pygame',
],
keywords='pygame engine',
url='http://github.com/rlc2/pygame_maker',
author='Ron Lockwood-Childs',
author_email='rlc2@dslextreme.com',
license='LGPL v2.1',
packages=[
'pygame_maker',
'pygame_maker.actions',
'pygame_maker.actors',
'pygame_maker.events',
'pygame_maker.logic',
'pygame_maker.scenes',
'pygame_maker.sounds',
'pygame_maker.support',
],
package_data = {
'': ['script_data/*.png','script_data/*.wav','script_data/*.yaml','script_data/*.tmpl','tests/unittest_files/*']
},
scripts = [
'scripts/pygame_maker_app.py'
],
install_requires=[
'numpy>=1.10.1',
'yaml>=3.11',
'pyparsing>=2.0.5',
'pygame>=1.9.0',
],
zip_safe=False)
| Python | 0 | |
1c608e69ecf61484ea1210fe0d6dc8d116c583d3 | Update homepage in setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='linaro-django-pagination',
version=version,
description="linaro-django-pagination",
long_description=open("README").read(),
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='pagination,django',
author='Zygmunt Krynicki',
author_email='zygmunt.krynicki@linaro.org',
url='https://github.com/zyga/django-pagination',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='linaro-django-pagination',
version=version,
description="linaro-django-pagination",
long_description=open("README").read(),
classifiers=[
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Framework :: Django",
"Environment :: Web Environment",
],
keywords='pagination,django',
author='Zygmunt Krynicki',
author_email='zygmunt.krynicki@linaro.org',
url='http://launchpad.net/linaro-django-pagination/',
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
)
| Python | 0 |
3693c5696da5bb96fc242d276f0d1a0a983a9c5d | Add setup.py script | setup.py | setup.py | import os
from setuptools import setup
def read(file):
return open(os.path.join(os.path.dirname(__file__), file)).read()
setup(
name="vsut",
version="1.5.2",
author="Alex Egger",
author_email="alex.egger96@gmail.com",
description="A simple unit testing framework for Python 3.",
license="MIT",
keywords="unit unittest test testing",
url="http://github.com/zillolo/vsut-python",
packages=["vsut"],
scripts=["runner.py"],
entry_points = {"console_scripts" : ["vrun = runner:main"]},
long_description="""**V**\ ery **S**\ imple **U**\ nit **T**\ est
=============================================
**VSUT** is a simple unit test framework for Python.
Usage
-----
A unit can be described , like follows:
.. code:: python
...
class UnitTest(vsut.unit.Unit):
def testComponentOne(self):
...
def testComponentTwo(self):
...
Any methods that start with ‘test’ will be executed automatically, once
the case is run.
Asserts & Fail Conditions
-------------------------
The following methods can be used in a test-case to check for success or
failure:
- ``assertEqual(expected, actual)`` - Checks for equality of the two
arguments.
- ``assertNotEqual(expected, actual)`` - Checks for inequality of the
two arguments.
- ``assertTrue(expected)`` - Checks whether the argument is the boolean
value True.
- ``assertFalse(expected)`` - Checks whether the argument is the
boolean value False.
- ``assertIn(expected, collection)`` - Checks whether the argument is
in the collection.
- ``assertNotIn(expected, collection)`` - Checks whether the argument
is not in the collection.
- ``assertIs(expected, actual)`` - Checks whether the value is the
expected.
- ``assertIsNot(expected, actual)`` - Checks whether the value is not
the expected.
- ``assertIsNone(expected)`` - Checks whether the argument is None.
- ``assertIsNotNone(expected)`` - Checks whether the argument is not
None.
- ``assertRaises(exception, func, *args)`` - Checks whether the
function ‘func’ raises an exception of the type ‘exception’.
For any of these methods a **message** parameter can be specified, that
will be printed instead of the default message.
Example
^^^^^^^
.. code:: python
...
assertEqual(True, False, message="True is not False")
...
Full Example
------------
.. code:: python
from vsut.unit import Unit
from vsut.assertion import assertEqual
class TestCase(Unit):
def testExample(self):
a = True
b = True
c = False
assertEqual(a, b)
assertEqual(b, c)
Running units
-------------
Units can be run with the test runner, as follows:
::
python runner.py [--format=table] module.TestClass module1.TestClass1 ...
| The ``--format`` argument is optional and specifies the method of
formatting the output. Available methods are ``table`` and ``csv``,
with ``table`` being the default.
| The separator for the csv-data can be specified with the parameter
``--separator``.
**NOTE**: Some characters require escaping with ``\``, as they are
special characters.
Output as Table
^^^^^^^^^^^^^^^
| Output as a table can look like this for example:
| \`\`\`
| [TestCase]
| Id \| Name \| Status \| Time \| Assert \| Message
| 0 \| testAssertEqual \| OK \| 0.000003 \| \|
| 1 \| testAssertEqualFail \| OK \| 0.000008 \| \|
| 2 \| testAssertFalse \| OK \| 0.000001 \| \|
| 3 \| testAssertIn \| OK \| 0.000002 \| \|
| 4 \| testAssertIs \| OK \| 0.000001 \| \|
| 5 \| testAssertIsNone \| OK \| 0.000002 \| \|
| 6 \| testAssertIsNot \| OK \| 0.000001 \| \|
| 7 \| testAssertIsNotNone \| OK \| 0.000001 \| \|
| 8 \| testAssertNotEqual \| OK \| 0.000001 \| \|
| 9 \| testAssertNotIn \| OK \| 0.000002 \| \|
| 10 \| testAssertRaises \| OK \| 0.000005 \| \|
| 11 \| testAssertTrue \| OK \| 0.000002 \| \|
| 12 \| testFailWithCustomMessage \| FAIL \| 0.000003 \| assertEqual \|
A custom message.
| 13 \| testWillFail \| FAIL \| 0.000003 \| assertEqual \| 1 != 2
| 14 \| testWillFailToo \| FAIL \| 0.000003 \| assertNotEqual \| 1 == 1
::
#### Output as CSV
Output as CSV can look like this for example:
| TestCase
| 0,testAssertEqual,OK,0.000004
| 1,testAssertEqualFail,OK,0.000011
| 2,testAssertFalse,OK,0.000002
| 3,testAssertIn,OK,0.000004
| 4,testAssertIs,OK,0.000004
| 5,testAssertIsNone,OK,0.000002
| 6,testAssertIsNot,OK,0.000004
| 7,testAssertIsNotNone,OK,0.000002
| 8,testAssertNotEqual,OK,0.000003
| 9,testAssertNotIn,OK,0.000002
| 10,testAssertRaises,OK,0.000007
| 11,testAssertTrue,OK,0.000003
| 12,testFailWithCustomMessage,FAIL,0.000006,assertEqual,A custom
message.
| 13,testWillFail,FAIL,0.000007,assertEqual,1 != 2
| 14,testWillFailToo,FAIL,0.000006,assertNotEqual,1 == 1
| \`\`\`
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Testing"]
)
#TODO: Find a way so people can execute runner.py when importing with pip.
| Python | 0.000001 | |
565ff051cabe9eaec6f24df6e8c31115e0a4eed8 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(name='VSTools',
version='0.1',
description='Easy use Visual Studio msbuild with python. ',
author='eternnoir',
author_email='eternnoir@gmail.com',
url='https://github.com/eternnoir/VSTools',
packages=['VSTools'],
) | Python | 0.000001 | |
b164ec6fae6ea9a6734ac58ddd8c3b89f73713fe | Add setup.py | setup.py | setup.py |
from distutils.core import setup
setup(
name='django-classy-settings',
version='0.1',
description='Simple class-based settings for Django',
author='Curtis Maloney',
author_email='curtis@tinbrain.net',
packages=['cbs',],
)
| Python | 0.000001 | |
69f9f090cfa5c9ca5d7dde70cfcdd3327147bdb7 | Create setup.py | setup.py | setup.py | import cx_Oracle
'''
- This is to drop all pervious tables and create new tables
- Call setup(curs, connection) in main function
- setup(curs, connection) returns nothing
- Do not call dropTable() and createTable() in main
unless you really want to do so
'''
def dropTable(curs):
droplst = []
droplst.append("drop table owner")
droplst.append("drop table auto_sale")
droplst.append("drop table restriction")
droplst.append("drop table driving_condition")
droplst.append("drop table ticket")
droplst.append("drop table ticket_type")
droplst.append("drop table vehicle")
droplst.append("drop table vehicle_type")
droplst.append("drop table drive_licence")
droplst.append("drop table people")
for i in range(len(droplst)):
try:
curs.execute(droplst[i])
except:
pass
return
def createTable(curs):
createPeople = ("create table people "
"""(sin CHAR(15), name VARCHAR(40), height number(5, 2),
weight number(5, 2), eyecolor VARCHAR(10), haircolor VARCHAR(10),
addr VARCHAR2(50), gender CHAR, birthday DATE)""")
createdrive_licence = ("create table drive_licence "
"""(licence_no CHAR(15), sin CHAR(15), class VARCHAR(10),
photo BLOB, issuing_date DATE, expiring_date DATE)""")
createdriving_condition = ("create table driving_condition "
"""(c_id INTEGER, description VARCHAR(1024))""")
createrestriction = ("create table restriction "
"""(licence_no CHAR(15), r_id INTEGER)""")
createvehicle_type = ("create table vehicle_type "
"""(type_id INTEGER, type CHAR(10))""")
createvehicle = ("create table vehicle "
"""(serial_no CHAR(15), maker VARCHAR(20), model VARCHAR(20),
year number(4, 0), color VARCHAR(10), type_id INTEGER)""")
createowner = ("create table owner "
"""(owner_id CHAR(15), vehicle_id CHAR(15),
is_primary_owner CHAR(1))""")
createauto_sale = ("create table auto_sale "
"""(transaction_id int, seller_id CHAR(15), buyer_id CHAR(15),
vehicle_id CHAR(15), s_date date, price numeric(9, 2))""")
createticket_type = ("create table ticket_type "
"""(vtype CHAR(10), fine number(5, 2))""")
createticket = ("create table ticket "
"""(ticket_no int, violator_no CHAR(15), vehicle_id CHAR(15),
office_no CHAR(15), vtype CHAR(10), vdate date, place VARCHAR(20),
descriptions VARCHAR(1024))""")
curs.execute(createPeople)
curs.execute(createdrive_licence)
curs.execute(createdriving_condition)
curs.execute(createrestriction)
curs.execute(createvehicle_type)
curs.execute(createvehicle)
curs.execute(createowner)
curs.execute(createauto_sale)
curs.execute(createticket_type)
curs.execute(createticket)
return
def setup(curs, connection):
dropTable(curs)
createTable(curs)
connection.commit()
return
| Python | 0.000001 | |
fa4ce6dc15e8b47c5978c476db7801473820af0d | add setup.py | setup.py | setup.py | # -*- coding: utf-8 -*-
| Python | 0.000001 | |
8e8fbf8b63239915736b788b7f1c8ac21a48c190 | Add a basic setup.py script | setup.py | setup.py | from distutils.core import setup
from coil import __version__ as VERSION
setup(
name = 'coil',
version = VERSION,
author = 'Michael Marineau',
author_email = 'mike@marineau.org',
description = 'A powerful configuration language',
license = 'MIT',
packages = ['coil', 'coil.test'],
scripts = ['bin/coildump'],
)
| Python | 0.000001 | |
d074995f8ce5a62104525b1f3cfed10ace12c3bc | add setup.py | setup.py | setup.py | from setuptools import setup
setup(name="feature",
version="0.1",
url="https://github.com/slyrz/feature",
description="Easy feature engineering.",
long_description=open('README.md').read(),
packages=['feature', 'feature.plugin'],
license='MIT')
| Python | 0.000001 | |
699ac33eec57fa49e2c1917d2bf17950bd6e6474 | Create setup script | setup.py | setup.py | """Setup script of mots-vides"""
from setuptools import setup
from setuptools import find_packages
import mots_vides
setup(
name='mots-vides',
version=mots_vides.__version__,
description='Python library for managing stop words in many languages.',
long_description=open('README.rst').read(),
keywords='stop, words, text, parsing',
author=mots_vides.__author__,
author_email=mots_vides.__email__,
url=mots_vides.__url__,
license=open('LICENSE').read(),
packages=find_packages(),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules']
)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.