repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
alex/treq | treq/test/test_utils.py | 11 | 2084 | import mock
from treq.test.util import TestCase
from treq._utils import default_reactor, default_pool, set_global_pool
class DefaultReactorTests(TestCase):
def test_passes_reactor(self):
mock_reactor = mock.Mock()
self.assertEqual(default_reactor(mock_reactor), mock_reactor)
def test_uses_default_reactor(self):
from twisted.internet import reactor
self.assertEqual(default_reactor(None), reactor)
class DefaultPoolTests(TestCase):
def setUp(self):
set_global_pool(None)
pool_patcher = mock.patch('treq._utils.HTTPConnectionPool')
self.HTTPConnectionPool = pool_patcher.start()
self.addCleanup(pool_patcher.stop)
self.reactor = mock.Mock()
def test_persistent_false(self):
self.assertEqual(
default_pool(self.reactor, None, False),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=False
)
def test_pool_none_persistent_none(self):
self.assertEqual(
default_pool(self.reactor, None, None),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=True
)
def test_pool_none_persistent_true(self):
self.assertEqual(
default_pool(self.reactor, None, True),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=True
)
def test_cached_global_pool(self):
pool1 = default_pool(self.reactor, None, None)
self.HTTPConnectionPool.return_value = mock.Mock()
pool2 = default_pool(self.reactor, None, True)
self.assertEqual(pool1, pool2)
def test_specified_pool(self):
pool = mock.Mock()
self.assertEqual(
default_pool(self.reactor, pool, None),
pool
)
self.HTTPConnectionPool.assert_not_called()
| mit |
huangbin0709/easyLinux | boot/u-boot-2015.01/tools/patman/command.py | 25 | 4223 | # Copyright (c) 2011 The Chromium OS Authors.
#
# SPDX-License-Identifier: GPL-2.0+
#
import os
import cros_subprocess
"""Shell command ease-ups for Python."""
class CommandResult:
"""A class which captures the result of executing a command.
Members:
stdout: stdout obtained from command, as a string
stderr: stderr obtained from command, as a string
return_code: Return code from command
exception: Exception received, or None if all ok
"""
def __init__(self):
self.stdout = None
self.stderr = None
self.combined = None
self.return_code = None
self.exception = None
def __init__(self, stdout='', stderr='', combined='', return_code=0,
exception=None):
self.stdout = stdout
self.stderr = stderr
self.combined = combined
self.return_code = return_code
self.exception = exception
# This permits interception of RunPipe for test purposes. If it is set to
# a function, then that function is called with the pipe list being
# executed. Otherwise, it is assumed to be a CommandResult object, and is
# returned as the result for every RunPipe() call.
# When this value is None, commands are executed as normal.
test_result = None
def RunPipe(pipe_list, infile=None, outfile=None,
capture=False, capture_stderr=False, oneline=False,
raise_on_error=True, cwd=None, **kwargs):
"""
Perform a command pipeline, with optional input/output filenames.
Args:
pipe_list: List of command lines to execute. Each command line is
piped into the next, and is itself a list of strings. For
example [ ['ls', '.git'] ['wc'] ] will pipe the output of
'ls .git' into 'wc'.
infile: File to provide stdin to the pipeline
outfile: File to store stdout
capture: True to capture output
capture_stderr: True to capture stderr
oneline: True to strip newline chars from output
kwargs: Additional keyword arguments to cros_subprocess.Popen()
Returns:
CommandResult object
"""
if test_result:
if hasattr(test_result, '__call__'):
return test_result(pipe_list=pipe_list)
return test_result
result = CommandResult()
last_pipe = None
pipeline = list(pipe_list)
user_pipestr = '|'.join([' '.join(pipe) for pipe in pipe_list])
kwargs['stdout'] = None
kwargs['stderr'] = None
while pipeline:
cmd = pipeline.pop(0)
if last_pipe is not None:
kwargs['stdin'] = last_pipe.stdout
elif infile:
kwargs['stdin'] = open(infile, 'rb')
if pipeline or capture:
kwargs['stdout'] = cros_subprocess.PIPE
elif outfile:
kwargs['stdout'] = open(outfile, 'wb')
if capture_stderr:
kwargs['stderr'] = cros_subprocess.PIPE
try:
last_pipe = cros_subprocess.Popen(cmd, cwd=cwd, **kwargs)
except Exception, err:
result.exception = err
if raise_on_error:
raise Exception("Error running '%s': %s" % (user_pipestr, str))
result.return_code = 255
return result
if capture:
result.stdout, result.stderr, result.combined = (
last_pipe.CommunicateFilter(None))
if result.stdout and oneline:
result.output = result.stdout.rstrip('\r\n')
result.return_code = last_pipe.wait()
else:
result.return_code = os.waitpid(last_pipe.pid, 0)[1]
if raise_on_error and result.return_code:
raise Exception("Error running '%s'" % user_pipestr)
return result
def Output(*cmd):
return RunPipe([cmd], capture=True, raise_on_error=False).stdout
def OutputOneLine(*cmd, **kwargs):
raise_on_error = kwargs.pop('raise_on_error', True)
return (RunPipe([cmd], capture=True, oneline=True,
raise_on_error=raise_on_error,
**kwargs).stdout.strip())
def Run(*cmd, **kwargs):
return RunPipe([cmd], **kwargs).stdout
def RunList(cmd):
return RunPipe([cmd], capture=True).stdout
def StopAll():
cros_subprocess.stay_alive = False
| gpl-3.0 |
nuigroup/pymt-widgets | examples/games/tictactoe/tictactoe.py | 2 | 4900 | from pymt import *
from OpenGL.GL import *
# PYMT Plugin integration
IS_PYMT_PLUGIN = True
PLUGIN_TITLE = 'TicTacToe Game'
PLUGIN_AUTHOR = 'Thomas Hansen + Mathieu Virbel'
PLUGIN_DESCRIPTION = 'Tic Tac Toe game!'
class TTTWinner(MTWidget):
def __init__(self, **kwargs):
super(TTTWinner, self).__init__(**kwargs)
self.game = kwargs.get('game')
self.text = kwargs.get('text')
def draw(self):
set_color(0, 0, 0, .9)
center=Vector(self.get_parent_window().size)/2
drawRectangle(pos=(0, center.y-50), size=(self.get_parent_window().width, 100))
drawLabel(label=self.text, pos=center, font_size=28)
def on_touch_down(self, *largs):
self.parent.remove_widget(self)
self.game.restart()
return True
def on_touch_move(self, *largs):
return True
def on_touch_up(self, *largs):
return True
class TTTGame(MTButtonMatrix):
def __init__(self,**kwargs):
kwargs.setdefault('matrix_size', (3, 3))
super(TTTGame, self).__init__(**kwargs)
self.player_images = (MTWidget(),MTSvg(filename='cross.svg'),MTSvg(filename='circle.svg') )
self.player = 1
self.done = False
self.testpoint = (0,0)
def restart(self):
self.done = False
self.player = 1
self.testpoint = (0, 0)
self.reset()
def show_winner(self, text):
popup = TTTWinner(game=self, text=text)
self.get_parent_window().add_widget(popup)
def select_area(self,i,j):
self.matrix[i][j] = self.player
winner = self.check_win()
if winner is not None:
self.done = True
self.show_winner("WINNER !")
elif self.check_full():
self.done = True
self.show_winner("GAME OVER :(")
else:
if self.player == 1:
self.player = 2
else:
self.player = 1
def on_resize(self, w, h):
self._width, self._height = w,h
def on_touch_down(self, touch):
if self.done:
return True
i,j = self.collide_point(int(touch.x),int(touch.y))
if self.matrix[i][j] == 0:
self.select_area(i,j)
else:
pass
def draw_tile(self, i, j):
image = self.player_images[self.matrix[i][j]%3]
glPushMatrix()
glTranslatef(self.width/self.matrix_size[0]*i, self.height/self.matrix_size[1]*j,0)
s = (self.width/self.matrix_size[0],self.height/self.matrix_size[1])
if self.matrix[i][j]%3 == 0:
set_color(0.25, 0.25, 0.25)
drawRectangle(pos=(20,20),size=(s[0]-40, s[1]-40))
if self.matrix[i][j]%3 == 1:
set_color(1,0,0)
drawCircle(pos=(s[0]/2, s[1]/2), radius=s[1]/2)
if self.matrix[i][j]%3 == 2:
set_color(0,0,1)
drawCircle(pos=(s[0]/2, s[1]/2), radius=s[1]/2)
if self.matrix[i][j] > 2:
set_color(0,1,0)
drawCircle(pos=(s[0]/2, s[1]/2), radius=s[1]/2)
sx, sy = s[0]/image.width, s[1]/image.height
set_color(1, 1, 1, .99)
glScaled(sx,sy,1)
image.draw()
glPopMatrix()
def check_row_win(self, p1, p2, p3):
if (self.matrix[p1[0]][p1[1]] == self.player and
self.matrix[p2[0]][p2[1]] == self.player and
self.matrix[p3[0]][p3[1]] == self.player):
self.matrix[p1[0]][p1[1]] = self.matrix[p2[0]][p2[1]] = self.matrix[p3[0]][p3[1]] = 3+self.player
return True
return False
def check_win(self):
if self.check_row_win((0,0),(1,0), (2,0)):
return (0, 0)
if self.check_row_win((0,1),(1,1), (2,1)):
return (0, 1)
if self.check_row_win((0,2),(1,2), (2,2)):
return (0, 2)
if self.check_row_win((0,0),(0,1), (0,2)):
return (0, 0)
if self.check_row_win((1,0),(1,1), (1,2)):
return (1, 0)
if self.check_row_win((2,0),(2,1), (2,2)):
return (2, 0)
if self.check_row_win((0,0),(1,1), (2,2)):
return (0, 0)
if self.check_row_win((2,0),(1,1), (0,2)):
return (0, 0)
return None
def check_full(self):
full = 0
for x in range(0, self.matrix_size[0]):
for y in range(0, self.matrix_size[1]):
if self.matrix[x][y] == 0:
full += 1
if full == 0:
return True
return False
def pymt_plugin_activate(w, ctx):
ctx.game = TTTGame(size=w.size)
w.add_widget(ctx.game)
def pymt_plugin_deactivate(w, ctx):
w.remove_widget(ctx.game)
#start the application (inits and shows all windows)
if __name__ == '__main__':
w = MTWindow()
ctx = MTContext()
pymt_plugin_activate(w, ctx)
runTouchApp()
pymt_plugin_deactivate(w, ctx)
| lgpl-3.0 |
dknlght/dkodi | src/script.module.cryptopy/lib/crypto/cipher/tkip_fake_crc_test.py | 1 | 3754 | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
""" crypto.cipher.tkip_fake_crc_test
This module tests the creation of TKIP data that passes
the WEP crc, but would fail the Michael MIC check.
The IEEE TGi specification mandates a 60 second
dissassociation of all sessions when two of these
malicious packets are recieved in a 60 second period.
Copyright © (c) 2003 by Paul A. Lambert.
See LICENSE.txt for license terms of this software.
"""
import unittest
from crypto.cipher.tkip_encr import TKIP_encr
from crypto.common import xor
from binascii_plus import *
from zlib import crc32
from struct import pack, unpack
class TKIP_tkip_fake_crc_test(unittest.TestCase):
""" Create TKIP encrypted data, modifiy it and patch the crc32 """
def testTKIP_crc_modify(self):
""" TKIP crc modification test """
key = a2b_p( "00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f" ) # the PMK
ta = a2b_p( "10 22 33 44 55 66" ) # the TK (key) is created from the iv and ta
keyID = 0
alg = TKIP_encr(key, ta, keyID) # this is just the encryption algorithm with no Michael MIC
plainText = ''.join([chr(i) for i in range(20)]) # 20 octets (0 t 19)
iv = a2b_p( "01 00 00 00 00 00" ) # the little-endian encoded PacketNumber
cipherText = alg.encrypt(plainText, iv)
ctHeader = cipherText[0:8] # encoded iv and keyId
ctData = cipherText[8:-4]
ctCrcEncrypted = cipherText[-4:] # just the encrypted crc fields
# lets change the first octet of the data from 0x00 to 0x01
base = (len(ctData))*chr(0)
baseCrc = crc32(base)
bitMask = chr(1)+(len(ctData)-1)*chr(0)
maskCrc = crc32(bitMask)
maskedCt = xor(bitMask,ctData)
maskedCtCrc = crc32(maskedCt)
print "--------------- make a modified packet and MIC ------------"
print "plainText = %s " % b2a_hex(plainText)
print "cipherText= %s " % b2a_hex(cipherText)
print "ctData = %s " % b2a_hex(ctData)
print "ctxtCrc = %s " % b2a_hex(ctCrcEncrypted)
print "base = %s " % b2a_hex(base)
print "baseCrc = %0X" % baseCrc
print "mask = %s " % b2a_hex(bitMask)
print "maskCrc = %0X" % maskCrc
print "maskedCt = %s " % b2a_hex(maskedCt)
print "maskCtCrc= %0X" % maskedCtCrc
maskDiff = maskCrc ^ baseCrc
newCtCrc = pack('<I', (maskDiff ^ unpack('<I',ctCrcEncrypted)[0]) )
newCt = ctHeader + maskedCt + newCtCrc
newPt = alg.decrypt(newCt) # this will raise an exception if the crc is 'bad'!
print "newPt = %s " % b2a_hex(newPt)
def test_TKIP_MIC_Analysis(self):
""" Simple analysis of TKIP CRC attacks based on
given Michael strength of 2^20
"""
michaelStrength = 2.**20 # probability of MIC attack from N.F.
secondsPerHour = 60.*60.
secondsPerDay = 24.*secondsPerHour
secondsPerYear = 365.*secondsPerDay
attacksPerSecond = 1.
attacksPerYear = attacksPerSecond * secondsPerYear
print "\n\n---- Michael Attack Analysis w/wo Countermeasures ----"
print "%s"%"Attacks Number Counter Mean"
print "%s"%" per of Measure Success"
print "%s"%"Second STAs Type Time"
print "------------------------------------"
print " 1 1 none %3d days" % (michaelStrength/secondsPerDay/attacksPerSecond)
attacksPerSecond = 100
print " 100 1 none %3d hours" % (michaelStrength/secondsPerHour/attacksPerSecond)
print " .016 1 60sec/session %3d year" % (michaelStrength/secondsPerYear/(1/60.))
print " 100/60 100 60sec/session %3d days" % (michaelStrength/secondsPerDay/(100./60.) )
print " 100 1 none %3d hours" % (michaelStrength/secondsPerHour/attacksPerSecond)
if __name__ == '__main__':
# Run the tests from the command line
unittest.main()
| gpl-2.0 |
uannight/reposan | plugin.video.tvalacarta/channels/ib3.py | 1 | 9215 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# tvalacarta - XBMC Plugin
# Canal para IB3
# http://blog.tvalacarta.info/plugin-xbmc/tvalacarta/
#------------------------------------------------------------
import urlparse,re
from core import logger
from core import scrapertools
from core.item import Item
DEBUG = True
CHANNELNAME = "ib3"
MAIN_URL = "http://ib3tv.com/carta"
def isGeneric():
return True
def mainlist(item):
logger.info("tvalacarta.channels.ib3 mainlist")
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="Programes" , action="categoria" , extra="Programes", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Sèries" , action="categoria" , extra="Sèries", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Informatius" , action="categoria" , extra="Informatius", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Esports" , action="categoria" , extra="Esports", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Retransmissions" , action="categoria" , extra="Retransmissió", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Arxiu" , action="categoria" , extra="ARXIU", url=MAIN_URL, folder=True) )
itemlist.append( Item(channel=CHANNELNAME, title="Programes [A-Z] (thumbnail)" , action="programas" , url=MAIN_URL, folder=True) )
return itemlist
def categoria(item):
logger.info("tvalacarta.channels.ib3 categoria")
itemlist=[]
# Descarga la página
item.url = MAIN_URL
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<li><a href="">'+item.extra+'</a>(.*?)</div>')
#logger.info(data)
# Extrae los programas
'''
<li><a href="">Pr</a>
<div>
<p>Programes de producció pròpia</p>
<ul>
<li><a href="javascript:stepcarousel.loadcontent('f-slide', '/wp-content/themes/ib3tv/carta/update.php?programId=7326efc5-93ce-4904-9571-a53d19c70217')">AIXÒ ÉS MEL</a></li>
<li><a href="javascript:stepcarousel.loadcontent('f-slide', '/wp-content/themes/ib3tv/carta/update.php?programId=01e0e6c9-b2fd-4f5e-8641-17e3e455a553')">AIXÒ ÉS TOT</a></li>
<li><a href="javascript:stepcarousel.loadcontent('f-slide', '/wp-content/themes/ib3tv/carta/update.php?programId=ff2ec1f6-a5ee-4d4d-b864-013f125c088a')">AIXÒ NO ÉS ISLÀNDIA</a></li>
'''
patron = "<li><a href=\"javascript.stepcarousel.loadcontent\('f-slide', '([^']+)'\)\">(.*?)</li>"
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle).strip()
#/wp-content/themes/ib3/carta/update.php?programId=2d15fe76-bbed-40c9-95e3-32a800174b7c
#http://ib3tv.com/wp-content/themes/ib3/carta/update.php?programId=e8f6d4ec-1d7c-4101-839a-36393d0df2a8
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = ""
plot = ""
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="episodios" , url=url, page=url , thumbnail=thumbnail, plot=plot , show=title , category = "programas" , folder=True) )
return itemlist
def programas(item):
logger.info("tvalacarta.channels.ib3 programlist")
itemlist=[]
# Descarga la página
item.url = MAIN_URL
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,'<h1>Recerca programes(.*?)<div class="end"')
#logger.info(data)
# Extrae los programas
patron = '<div class="mielement"[^<]+'
patron += '<div class="shadow"[^<]+'
patron += "<a href=\"javascript.stepcarousel.loadcontent\('f-slide', '([^']+)'\)\">"
patron += '<img src="([^"]+)"[^<]+</a[^<]+'
patron += '</div[^<]+'
patron += '<div class="nombres"><strong>([^<]+)</strong>'
matches = re.compile(patron,re.DOTALL).findall(data)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
title = scrapedtitle.strip()
#/wp-content/themes/ib3/carta/update.php?programId=2d15fe76-bbed-40c9-95e3-32a800174b7c
#http://ib3tv.com/wp-content/themes/ib3/carta/update.php?programId=e8f6d4ec-1d7c-4101-839a-36393d0df2a8
url = urlparse.urljoin(item.url,scrapedurl)
thumbnail = scrapedthumbnail
plot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=CHANNELNAME, title=title , action="episodios" , url=url, page=url , thumbnail=thumbnail, plot=plot , show=title , category = "programas" , folder=True) )
return itemlist
def episodios(item):
logger.info("tvalacarta.channels.ib3 episodios")
itemlist = []
# Descarga la página
headers = []
headers.append(["Accept","*/*"])
headers.append(["Accept-Encoding","gzip,deflate"])
headers.append(["Accept-Language","es-ES,es;q=0.8,en;q=0.6"])
headers.append(["Connection","keep-alive"])
headers.append(["Referer","http://ib3tv.com/carta"])
headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36"])
headers.append(["X-Requested-With","XMLHttpRequest"])
data = scrapertools.cachePage(item.url,headers=headers)
logger.info(data)
# Extrae los capítulos
'''
<div class="keyelement">
<div class="keyimage">
<div class="shadow">
<a href="javascript:CambiaGraf('7e10a2b1-29a6-4c59-9bc0-77be28888cf6')" ><img src="http://media.ib3alacarta.com/e8f6d4ec-1d7c-4101-839a-36393d0df2a8/7e10a2b1-29a6-4c59-9bc0-77be28888cf6/5120551.jpg" width="190" height="120"/></a>
</div>
</div>
<div class="keytext">
<font color="#c6006f"><strong><b>Au idò!</b></strong></font>
<br />
<font color="#000000"></font>
<br />
<font size="0.5">02 06 2011 - Capítol: 57</font>
</div>
</div>
'''
'''
<div class="keyelement">
<div class="keyimage">
<div class="shadow">
<a href="javascript:CambiaGraf('0f8a716f-d03b-4e02-84e9-48cba55cd576','Això és Tot! | Cap: 67')" ><img src="http://media.ib3alacarta.com/01e0e6c9-b2fd-4f5e-8641-17e3e455a553/0f8a716f-d03b-4e02-84e9-48cba55cd576/4266329.jpg" height="120px" " width="190"/></a>
</div>
</div>
<div class="keytext">
<font color="#c6006f" size="2.5"><strong>Això és Tot!</strong></font>
<br />
<font color="#595959">Això és Tot!</font>
<br />
<font size="0.5">03 11 2010 - Capítol: 67</font>
</div>
</div>
'''
patron = '<div class="keyelement"[^<]+'
patron += '<div class="keyimage"[^<]+'
patron += '<div class="shadow"[^<]+'
patron += '<a href="javascript:CambiaGraf.\'([^\']+)\',\'[^\']+\'." ><img src="([^"]+)"[^<]+</a>[^<]+'
patron += '</div>[^<]+'
patron += '</div>[^<]+'
patron += '<div class="keytext">(.*?)</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
# Extrae los items
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
title = scrapertools.htmlclean(scrapedtitle).strip()
title = re.compile("\s+",re.DOTALL).sub(" ",title)
url = "http://ib3tv.com/wp-content/themes/ENS/carta/titulos.php?type=TV&id="+scrapedurl
thumbnail = scrapedthumbnail
plot = ""
itemlist.append( Item(channel=CHANNELNAME, title=title , fulltitle = item.show + " - " + title , action="play" , page = url, url=url, thumbnail=thumbnail, show=item.show , plot=plot , folder=False) )
return itemlist
def play(item):
logger.info("tvalacarta.channels.ib3 play")
itemlist = []
data = scrapertools.cache_page(item.url)
logger.info("data="+data)
#'src', "http://media.ib3alacarta.com/ff2ec1f6-a5ee-4d4d-b864-013f125c088a/7d3a82fb-6c03-4500-8c65-5ea510c61420/5018376.mp4"
mediaurl = scrapertools.get_match(data,"file:'([^']+)',")
itemlist.append( Item(channel=CHANNELNAME, title=item.title , action="play" , server="directo" , url=mediaurl, thumbnail=item.thumbnail, show=item.show , folder=False) )
return itemlist
def test():
# Comprueba que la primera opción tenga algo
mainlist_items = mainlist(Item())
for mainlist_item in mainlist_items:
exec "programas_items = "+mainlist_item.action+"(mainlist_item)"
if len(programas_items)==0:
print "La opción "+mainlist_item.title+" no tiene programas"
return False
exec "programas_items = "+mainlist_items[0].action+"(mainlist_items[0])"
episodios_items = episodios(programas_items[0])
if len(episodios_items)==0:
print "El programa "+programas_items[0].title+" no tiene episodios"
return False
videos_items = play(episodios_items[0])
if len(episodios_items)==0:
print "El episodio "+episodios_items[0].title+" del programa "+programas_items[0].title+" no tiene videos"
return False
return True | gpl-2.0 |
rrohan/scikit-learn | sklearn/pipeline.py | 61 | 21271 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
from warnings import warn
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
warn("From version 0.19, a 1d X will not be reshaped in"
" pipeline.inverse_transform any more.", FutureWarning)
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
fernandezcuesta/ansible | lib/ansible/modules/network/vyos/vyos_user.py | 11 | 10403 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = """
---
module: vyos_user
version_added: "2.4"
author: "Trishna Guha (@trishnag)"
short_description: Manage the collection of local users on VyOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
options:
users:
description:
- The set of username objects to be configured on the remote
VyOS device. The list entries can either be the username or
a hash of username and properties. This argument is mutually
exclusive with the C(name) argument. alias C(aggregate).
name:
description:
- The username to be configured on the VyOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
password:
description:
- The password to be configured on the VyOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
level:
description:
- The C(level) argument configures the level of the user when logged
into the system. This argument accepts string values admin or operator.
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: create a new user
vyos_user:
name: ansible
password: password
state: present
- name: remove all users except admin
vyos_user:
purge: yes
- name: set multiple users to level operator
vyos_user:
users:
- name: netop
- name: netend
level: operator
state: present
- name: Change Password for User netop
vyos_user:
name: netop
password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system login user test level operator
- set system login user authentication plaintext-password password
"""
import re
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def validate_level(value, module):
if value not in ('admin', 'operator'):
module.fail_json(msg='level must be either admin or operator, got %s' % value)
def spec_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('set system login user %s %s' % (want['name'], x))
for update in updates:
want, have = update
if want['state'] == 'absent':
commands.append('delete system login user %s' % want['name'])
continue
if needs_update(want, have, 'level'):
add(commands, want, "level %s" % want['level'])
if needs_update(want, have, 'full_name'):
add(commands, want, "full-name %s" % want['full_name'])
if needs_update(want, have, 'password'):
if update_password == 'always' or not have:
add(commands, want, 'authentication plaintext-password %s' % want['password'])
return commands
def parse_level(data):
match = re.search(r'level (\S+)', data, re.M)
if match:
level = match.group(1)[1:-1]
return level
def parse_full_name(data):
match = re.search(r'full-name (\S+)', data, re.M)
if match:
full_name = match.group(1)[1:-1]
return full_name
def config_to_dict(module):
data = get_config(module)
match = re.findall(r'^set system login user (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r' %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'password': None,
'level': parse_level(cfg),
'full_name': parse_full_name(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = module.argument_spec[key].get('type', 'str')
type_checker = module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
users = module.params['users']
if not users:
if not module.params['name'] and module.params['purge']:
return list()
elif not module.params['name']:
module.fail_json(msg='username is required')
else:
aggregate = [{'name': module.params['name']}]
else:
aggregate = list()
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
module.fail_json(msg='name is required')
else:
aggregate.append(item)
objects = list()
for item in aggregate:
get_value = partial(get_param_value, item=item, module=module)
item['password'] = get_value('password')
item['full_name'] = get_value('full_name')
item['level'] = get_value('level')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if item is None:
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
argument_spec = dict(
users=dict(type='list', aliases=['aggregate']),
name=dict(),
full_name=dict(),
level=dict(aliases=['role']),
password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
purge=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(vyos_argument_spec)
mutually_exclusive = [('name', 'users')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
commands.append('delete system login user %s' % item)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
pnorman/mapnik | scons/scons-local-2.4.1/SCons/Tool/javac.py | 6 | 8631 | """SCons.Tool.javac
Tool-specific initialization for javac.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2015 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/javac.py rel_2.4.1:3453:73fefd3ea0b0 2015/11/09 03:25:05 bdbaddog"
import os
import os.path
import SCons.Action
import SCons.Builder
from SCons.Node.FS import _my_normcase
from SCons.Tool.JavaCommon import parse_java_file
import SCons.Util
def classname(path):
"""Turn a string (path name) into a Java class name."""
return os.path.normpath(path).replace(os.sep, '.')
def emit_java_classes(target, source, env):
"""Create and return lists of source java files
and their corresponding target class files.
"""
java_suffix = env.get('JAVASUFFIX', '.java')
class_suffix = env.get('JAVACLASSSUFFIX', '.class')
target[0].must_be_same(SCons.Node.FS.Dir)
classdir = target[0]
s = source[0].rentry().disambiguate()
if isinstance(s, SCons.Node.FS.File):
sourcedir = s.dir.rdir()
elif isinstance(s, SCons.Node.FS.Dir):
sourcedir = s.rdir()
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % s.__class__)
slist = []
js = _my_normcase(java_suffix)
for entry in source:
entry = entry.rentry().disambiguate()
if isinstance(entry, SCons.Node.FS.File):
slist.append(entry)
elif isinstance(entry, SCons.Node.FS.Dir):
result = SCons.Util.OrderedDict()
dirnode = entry.rdir()
def find_java_files(arg, dirpath, filenames):
java_files = sorted([n for n in filenames
if _my_normcase(n).endswith(js)])
mydir = dirnode.Dir(dirpath)
java_paths = [mydir.File(f) for f in java_files]
for jp in java_paths:
arg[jp] = True
for dirpath, dirnames, filenames in os.walk(dirnode.get_abspath()):
find_java_files(result, dirpath, filenames)
entry.walk(find_java_files, result)
slist.extend(list(result.keys()))
else:
raise SCons.Errors.UserError("Java source must be File or Dir, not '%s'" % entry.__class__)
version = env.get('JAVAVERSION', '1.4')
full_tlist = []
for f in slist:
tlist = []
source_file_based = True
pkg_dir = None
if not f.is_derived():
pkg_dir, classes = parse_java_file(f.rfile().get_abspath(), version)
if classes:
source_file_based = False
if pkg_dir:
d = target[0].Dir(pkg_dir)
p = pkg_dir + os.sep
else:
d = target[0]
p = ''
for c in classes:
t = d.File(c + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = sourcedir
t.attributes.java_classname = classname(p + c)
tlist.append(t)
if source_file_based:
base = f.name[:-len(java_suffix)]
if pkg_dir:
t = target[0].Dir(pkg_dir).File(base + class_suffix)
else:
t = target[0].File(base + class_suffix)
t.attributes.java_classdir = classdir
t.attributes.java_sourcedir = f.dir
t.attributes.java_classname = classname(base)
tlist.append(t)
for t in tlist:
t.set_specific_source([f])
full_tlist.extend(tlist)
return full_tlist, slist
JavaAction = SCons.Action.Action('$JAVACCOM', '$JAVACCOMSTR')
JavaBuilder = SCons.Builder.Builder(action = JavaAction,
emitter = emit_java_classes,
target_factory = SCons.Node.FS.Entry,
source_factory = SCons.Node.FS.Entry)
class pathopt(object):
"""
Callable object for generating javac-style path options from
a construction variable (e.g. -classpath, -sourcepath).
"""
def __init__(self, opt, var, default=None):
self.opt = opt
self.var = var
self.default = default
def __call__(self, target, source, env, for_signature):
path = env[self.var]
if path and not SCons.Util.is_List(path):
path = [path]
if self.default:
default = env[self.default]
if default:
if not SCons.Util.is_List(default):
default = [default]
path = path + default
if path:
return [self.opt, os.pathsep.join(map(str, path))]
else:
return []
def Java(env, target, source, *args, **kw):
"""
A pseudo-Builder wrapper around the separate JavaClass{File,Dir}
Builders.
"""
if not SCons.Util.is_List(target):
target = [target]
if not SCons.Util.is_List(source):
source = [source]
# Pad the target list with repetitions of the last element in the
# list so we have a target for every source element.
target = target + ([target[-1]] * (len(source) - len(target)))
java_suffix = env.subst('$JAVASUFFIX')
result = []
for t, s in zip(target, source):
if isinstance(s, SCons.Node.FS.Base):
if isinstance(s, SCons.Node.FS.File):
b = env.JavaClassFile
else:
b = env.JavaClassDir
else:
if os.path.isfile(s):
b = env.JavaClassFile
elif os.path.isdir(s):
b = env.JavaClassDir
elif s[-len(java_suffix):] == java_suffix:
b = env.JavaClassFile
else:
b = env.JavaClassDir
result.extend(b(t, s, *args, **kw))
return result
def generate(env):
"""Add Builders and construction variables for javac to an Environment."""
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_class = SCons.Tool.CreateJavaClassFileBuilder(env)
java_class_dir = SCons.Tool.CreateJavaClassDirBuilder(env)
java_class.add_emitter(None, emit_java_classes)
java_class.add_emitter(env.subst('$JAVASUFFIX'), emit_java_classes)
java_class_dir.emitter = emit_java_classes
env.AddMethod(Java)
env['JAVAC'] = 'javac'
env['JAVACFLAGS'] = SCons.Util.CLVar('')
env['JAVABOOTCLASSPATH'] = []
env['JAVACLASSPATH'] = []
env['JAVASOURCEPATH'] = []
env['_javapathopt'] = pathopt
env['_JAVABOOTCLASSPATH'] = '${_javapathopt("-bootclasspath", "JAVABOOTCLASSPATH")} '
env['_JAVACLASSPATH'] = '${_javapathopt("-classpath", "JAVACLASSPATH")} '
env['_JAVASOURCEPATH'] = '${_javapathopt("-sourcepath", "JAVASOURCEPATH", "_JAVASOURCEPATHDEFAULT")} '
env['_JAVASOURCEPATHDEFAULT'] = '${TARGET.attributes.java_sourcedir}'
env['_JAVACCOM'] = '$JAVAC $JAVACFLAGS $_JAVABOOTCLASSPATH $_JAVACLASSPATH -d ${TARGET.attributes.java_classdir} $_JAVASOURCEPATH $SOURCES'
env['JAVACCOM'] = "${TEMPFILE('$_JAVACCOM','$JAVACCOMSTR')}"
env['JAVACLASSSUFFIX'] = '.class'
env['JAVASUFFIX'] = '.java'
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| lgpl-2.1 |
WeblateOrg/weblate | weblate/trans/models/change.py | 2 | 22952 | #
# Copyright © 2012 - 2021 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django.conf import settings
from django.db import models, transaction
from django.db.models import Count, Q
from django.utils import timezone
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy, ngettext_lazy, pgettext
from jellyfish import damerau_levenshtein_distance
from weblate.lang.models import Language
from weblate.trans.mixins import UserDisplayMixin
from weblate.trans.models.alert import ALERTS
from weblate.trans.models.project import Project
from weblate.utils.fields import JSONField
class ChangeQuerySet(models.QuerySet):
# pylint: disable=no-init
def content(self, prefetch=False):
"""Return queryset with content changes."""
base = self
if prefetch:
base = base.prefetch()
return base.filter(action__in=Change.ACTIONS_CONTENT)
@staticmethod
def count_stats(days, step, dtstart, base):
"""Count number of changes in given dataset and period grouped by step days."""
# Count number of changes
result = []
for _unused in range(0, days, step):
# Calculate interval
int_start = dtstart
int_end = int_start + timezone.timedelta(days=step)
# Count changes
int_base = base.filter(timestamp__range=(int_start, int_end))
count = int_base.aggregate(Count("id"))
# Append to result
result.append((int_start, count["id__count"]))
# Advance to next interval
dtstart = int_end
return result
def base_stats(
self,
days,
step,
project=None,
component=None,
translation=None,
language=None,
user=None,
):
"""Core of daily/weekly/monthly stats calculation."""
# Get range (actually start)
dtstart = timezone.now() - timezone.timedelta(days=days + 1)
# Base for filtering
base = self.all()
# Filter by translation/project
if translation is not None:
base = base.filter(translation=translation)
elif component is not None:
base = base.filter(component=component)
elif project is not None:
base = base.filter(project=project)
# Filter by language
if language is not None:
base = base.filter(language=language)
# Filter by language
if user is not None:
base = base.filter(user=user)
return self.count_stats(days, step, dtstart, base)
def prefetch(self):
"""Fetch related fields in a big chungs to avoid loading them individually."""
return self.prefetch_related(
"user",
"translation",
"component",
"project",
"unit",
"translation__language",
"translation__component",
"translation__component__project",
"unit__translation",
"unit__translation__language",
"unit__translation__plural",
"unit__translation__component",
"unit__translation__component__project",
"component__project",
)
def last_changes(self, user):
"""Return last changes for an user.
Prefilter Changes by ACL for users and fetches related fields for last changes
display.
"""
if user.is_superuser:
return self.prefetch().order()
return (
self.prefetch()
.filter(
Q(project_id__in=user.allowed_project_ids)
& (
Q(component__isnull=True)
| Q(component__restricted=False)
| Q(component_id__in=user.component_permissions)
)
)
.order()
)
def authors_list(self, date_range=None):
"""Return list of authors."""
authors = self.content()
if date_range is not None:
authors = authors.filter(timestamp__range=date_range)
return (
authors.exclude(author__isnull=True)
.values("author")
.annotate(change_count=Count("id"))
.values_list("author__email", "author__full_name", "change_count")
)
def order(self):
return self.order_by("-timestamp")
class ChangeManager(models.Manager):
def create(self, user=None, **kwargs):
"""Wrapper to avoid using anonymous user as change owner."""
if user is not None and not user.is_authenticated:
user = None
return super().create(user=user, **kwargs)
class Change(models.Model, UserDisplayMixin):
ACTION_UPDATE = 0
ACTION_COMPLETE = 1
ACTION_CHANGE = 2
ACTION_COMMENT = 3
ACTION_SUGGESTION = 4
ACTION_NEW = 5
ACTION_AUTO = 6
ACTION_ACCEPT = 7
ACTION_REVERT = 8
ACTION_UPLOAD = 9
ACTION_NEW_SOURCE = 13
ACTION_LOCK = 14
ACTION_UNLOCK = 15
ACTION_DUPLICATE_STRING = 16
ACTION_COMMIT = 17
ACTION_PUSH = 18
ACTION_RESET = 19
ACTION_MERGE = 20
ACTION_REBASE = 21
ACTION_FAILED_MERGE = 22
ACTION_FAILED_REBASE = 23
ACTION_PARSE_ERROR = 24
ACTION_REMOVE_TRANSLATION = 25
ACTION_SUGGESTION_DELETE = 26
ACTION_REPLACE = 27
ACTION_FAILED_PUSH = 28
ACTION_SUGGESTION_CLEANUP = 29
ACTION_SOURCE_CHANGE = 30
ACTION_NEW_UNIT = 31
ACTION_BULK_EDIT = 32
ACTION_ACCESS_EDIT = 33
ACTION_ADD_USER = 34
ACTION_REMOVE_USER = 35
ACTION_APPROVE = 36
ACTION_MARKED_EDIT = 37
ACTION_REMOVE_COMPONENT = 38
ACTION_REMOVE_PROJECT = 39
ACTION_DUPLICATE_LANGUAGE = 40
ACTION_RENAME_PROJECT = 41
ACTION_RENAME_COMPONENT = 42
ACTION_MOVE_COMPONENT = 43
ACTION_NEW_STRING = 44
ACTION_NEW_CONTRIBUTOR = 45
ACTION_ANNOUNCEMENT = 46
ACTION_ALERT = 47
ACTION_ADDED_LANGUAGE = 48
ACTION_REQUESTED_LANGUAGE = 49
ACTION_CREATE_PROJECT = 50
ACTION_CREATE_COMPONENT = 51
ACTION_INVITE_USER = 52
ACTION_HOOK = 53
ACTION_REPLACE_UPLOAD = 54
ACTION_LICENSE_CHANGE = 55
ACTION_AGREEMENT_CHANGE = 56
ACTION_SCREENSHOT_ADDED = 57
ACTION_SCREENSHOT_UPLOADED = 58
ACTION_CHOICES = (
# Translators: Name of event in the history
(ACTION_UPDATE, gettext_lazy("Resource update")),
# Translators: Name of event in the history
(ACTION_COMPLETE, gettext_lazy("Translation completed")),
# Translators: Name of event in the history
(ACTION_CHANGE, gettext_lazy("Translation changed")),
# Translators: Name of event in the history
(ACTION_NEW, gettext_lazy("New translation")),
# Translators: Name of event in the history
(ACTION_COMMENT, gettext_lazy("Comment added")),
# Translators: Name of event in the history
(ACTION_SUGGESTION, gettext_lazy("Suggestion added")),
# Translators: Name of event in the history
(ACTION_AUTO, gettext_lazy("Automatic translation")),
# Translators: Name of event in the history
(ACTION_ACCEPT, gettext_lazy("Suggestion accepted")),
# Translators: Name of event in the history
(ACTION_REVERT, gettext_lazy("Translation reverted")),
# Translators: Name of event in the history
(ACTION_UPLOAD, gettext_lazy("Translation uploaded")),
# Translators: Name of event in the history
(ACTION_NEW_SOURCE, gettext_lazy("New source string")),
# Translators: Name of event in the history
(ACTION_LOCK, gettext_lazy("Component locked")),
# Translators: Name of event in the history
(ACTION_UNLOCK, gettext_lazy("Component unlocked")),
# Translators: Name of event in the history
(ACTION_DUPLICATE_STRING, gettext_lazy("Found duplicated string")),
# Translators: Name of event in the history
(ACTION_COMMIT, gettext_lazy("Committed changes")),
# Translators: Name of event in the history
(ACTION_PUSH, gettext_lazy("Pushed changes")),
# Translators: Name of event in the history
(ACTION_RESET, gettext_lazy("Reset repository")),
# Translators: Name of event in the history
(ACTION_MERGE, gettext_lazy("Merged repository")),
# Translators: Name of event in the history
(ACTION_REBASE, gettext_lazy("Rebased repository")),
# Translators: Name of event in the history
(ACTION_FAILED_MERGE, gettext_lazy("Failed merge on repository")),
# Translators: Name of event in the history
(ACTION_FAILED_REBASE, gettext_lazy("Failed rebase on repository")),
# Translators: Name of event in the history
(ACTION_FAILED_PUSH, gettext_lazy("Failed push on repository")),
# Translators: Name of event in the history
(ACTION_PARSE_ERROR, gettext_lazy("Parse error")),
# Translators: Name of event in the history
(ACTION_REMOVE_TRANSLATION, gettext_lazy("Removed translation")),
# Translators: Name of event in the history
(ACTION_SUGGESTION_DELETE, gettext_lazy("Suggestion removed")),
# Translators: Name of event in the history
(ACTION_REPLACE, gettext_lazy("Search and replace")),
# Translators: Name of event in the history
(ACTION_SUGGESTION_CLEANUP, gettext_lazy("Suggestion removed during cleanup")),
# Translators: Name of event in the history
(ACTION_SOURCE_CHANGE, gettext_lazy("Source string changed")),
# Translators: Name of event in the history
(ACTION_NEW_UNIT, gettext_lazy("New string added")),
# Translators: Name of event in the history
(ACTION_BULK_EDIT, gettext_lazy("Bulk status change")),
# Translators: Name of event in the history
(ACTION_ACCESS_EDIT, gettext_lazy("Changed visibility")),
# Translators: Name of event in the history
(ACTION_ADD_USER, gettext_lazy("Added user")),
# Translators: Name of event in the history
(ACTION_REMOVE_USER, gettext_lazy("Removed user")),
# Translators: Name of event in the history
(ACTION_APPROVE, gettext_lazy("Translation approved")),
# Translators: Name of event in the history
(ACTION_MARKED_EDIT, gettext_lazy("Marked for edit")),
# Translators: Name of event in the history
(ACTION_REMOVE_COMPONENT, gettext_lazy("Removed component")),
# Translators: Name of event in the history
(ACTION_REMOVE_PROJECT, gettext_lazy("Removed project")),
# Translators: Name of event in the history
(ACTION_DUPLICATE_LANGUAGE, gettext_lazy("Found duplicated language")),
# Translators: Name of event in the history
(ACTION_RENAME_PROJECT, gettext_lazy("Renamed project")),
# Translators: Name of event in the history
(ACTION_RENAME_COMPONENT, gettext_lazy("Renamed component")),
# Translators: Name of event in the history
(ACTION_MOVE_COMPONENT, gettext_lazy("Moved component")),
# Not translated, used plural instead
(ACTION_NEW_STRING, "New string to translate"),
# Translators: Name of event in the history
(ACTION_NEW_CONTRIBUTOR, gettext_lazy("New contributor")),
# Translators: Name of event in the history
(ACTION_ANNOUNCEMENT, gettext_lazy("New announcement")),
# Translators: Name of event in the history
(ACTION_ALERT, gettext_lazy("New alert")),
# Translators: Name of event in the history
(ACTION_ADDED_LANGUAGE, gettext_lazy("Added new language")),
# Translators: Name of event in the history
(ACTION_REQUESTED_LANGUAGE, gettext_lazy("Requested new language")),
# Translators: Name of event in the history
(ACTION_CREATE_PROJECT, gettext_lazy("Created project")),
# Translators: Name of event in the history
(ACTION_CREATE_COMPONENT, gettext_lazy("Created component")),
# Translators: Name of event in the history
(ACTION_INVITE_USER, gettext_lazy("Invited user")),
# Translators: Name of event in the history
(ACTION_HOOK, gettext_lazy("Received repository notification")),
# Translators: Name of event in the history
(ACTION_REPLACE_UPLOAD, gettext_lazy("Replaced file by upload")),
# Translators: Name of event in the history
(ACTION_LICENSE_CHANGE, gettext_lazy("License changed")),
# Translators: Name of event in the history
(ACTION_AGREEMENT_CHANGE, gettext_lazy("Contributor agreement changed")),
# Translators: Name of event in the history
(ACTION_SCREENSHOT_ADDED, gettext_lazy("Screnshot added")),
# Translators: Name of event in the history
(ACTION_SCREENSHOT_UPLOADED, gettext_lazy("Screnshot uploaded")),
)
ACTIONS_DICT = dict(ACTION_CHOICES)
ACTION_STRINGS = {
name.lower().replace(" ", "-"): value for value, name in ACTION_CHOICES
}
ACTION_NAMES = {str(name): value for value, name in ACTION_CHOICES}
# Actions which can be reverted
ACTIONS_REVERTABLE = {
ACTION_ACCEPT,
ACTION_REVERT,
ACTION_CHANGE,
ACTION_UPLOAD,
ACTION_NEW,
ACTION_REPLACE,
ACTION_AUTO,
ACTION_APPROVE,
ACTION_MARKED_EDIT,
}
# Content changes considered when looking for last author
ACTIONS_CONTENT = {
ACTION_CHANGE,
ACTION_NEW,
ACTION_AUTO,
ACTION_ACCEPT,
ACTION_REVERT,
ACTION_UPLOAD,
ACTION_REPLACE,
ACTION_BULK_EDIT,
ACTION_APPROVE,
ACTION_MARKED_EDIT,
}
# Actions shown on the repository management page
ACTIONS_REPOSITORY = {
ACTION_COMMIT,
ACTION_PUSH,
ACTION_RESET,
ACTION_MERGE,
ACTION_REBASE,
ACTION_FAILED_MERGE,
ACTION_FAILED_REBASE,
ACTION_FAILED_PUSH,
ACTION_LOCK,
ACTION_UNLOCK,
ACTION_DUPLICATE_LANGUAGE,
}
# Actions where target is rendered as translation string
ACTIONS_SHOW_CONTENT = {
ACTION_SUGGESTION,
ACTION_SUGGESTION_DELETE,
ACTION_SUGGESTION_CLEANUP,
ACTION_BULK_EDIT,
ACTION_NEW_UNIT,
}
# Actions indicating a repository merge failure
ACTIONS_MERGE_FAILURE = {
ACTION_FAILED_MERGE,
ACTION_FAILED_REBASE,
ACTION_FAILED_PUSH,
}
PLURAL_ACTIONS = {
ACTION_NEW_STRING: ngettext_lazy(
"New string to translate", "New strings to translate"
),
}
AUTO_ACTIONS = {
# Translators: Name of event in the history
ACTION_LOCK: gettext_lazy("Component automatically locked"),
# Translators: Name of event in the history
ACTION_UNLOCK: gettext_lazy("Component automatically unlocked"),
}
unit = models.ForeignKey("Unit", null=True, on_delete=models.deletion.CASCADE)
language = models.ForeignKey(
"lang.Language", null=True, on_delete=models.deletion.CASCADE
)
project = models.ForeignKey("Project", null=True, on_delete=models.deletion.CASCADE)
component = models.ForeignKey(
"Component", null=True, on_delete=models.deletion.CASCADE
)
translation = models.ForeignKey(
"Translation", null=True, on_delete=models.deletion.CASCADE
)
comment = models.ForeignKey(
"Comment", null=True, on_delete=models.deletion.SET_NULL
)
suggestion = models.ForeignKey(
"Suggestion", null=True, on_delete=models.deletion.SET_NULL
)
announcement = models.ForeignKey(
"Announcement", null=True, on_delete=models.deletion.SET_NULL
)
screenshot = models.ForeignKey(
"screenshots.Screenshot", null=True, on_delete=models.deletion.SET_NULL
)
alert = models.ForeignKey("Alert", null=True, on_delete=models.deletion.SET_NULL)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, null=True, on_delete=models.deletion.CASCADE
)
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
null=True,
related_name="author_set",
on_delete=models.deletion.CASCADE,
)
timestamp = models.DateTimeField(auto_now_add=True, db_index=True)
action = models.IntegerField(
choices=ACTION_CHOICES, default=ACTION_CHANGE, db_index=True
)
target = models.TextField(default="", blank=True)
old = models.TextField(default="", blank=True)
details = JSONField()
objects = ChangeManager.from_queryset(ChangeQuerySet)()
class Meta:
app_label = "trans"
index_together = [
("translation", "action", "timestamp"),
]
verbose_name = "history event"
verbose_name_plural = "history events"
def __str__(self):
return _("%(action)s at %(time)s on %(translation)s by %(user)s") % {
"action": self.get_action_display(),
"time": self.timestamp,
"translation": self.translation,
"user": self.get_user_display(False),
}
def save(self, *args, **kwargs):
from weblate.accounts.tasks import notify_change
if self.unit:
self.translation = self.unit.translation
if self.screenshot:
self.translation = self.screenshot.translation
if self.translation:
self.component = self.translation.component
self.language = self.translation.language
if self.component:
self.project = self.component.project
super().save(*args, **kwargs)
transaction.on_commit(lambda: notify_change.delay(self.pk))
def get_absolute_url(self):
"""Return link either to unit or translation."""
if self.unit is not None:
return self.unit.get_absolute_url()
if self.screenshot is not None:
return self.screenshot.get_absolute_url()
if self.translation is not None:
if self.action == self.ACTION_NEW_STRING:
return self.translation.get_translate_url() + "?q=is:untranslated"
return self.translation.get_absolute_url()
if self.component is not None:
return self.component.get_absolute_url()
if self.project is not None:
return self.project.get_absolute_url()
return None
def __init__(self, *args, **kwargs):
self.notify_state = {}
super().__init__(*args, **kwargs)
@property
def plural_count(self):
return self.details.get("count", 1)
@property
def auto_status(self):
return self.details.get("auto", False)
def get_action_display(self):
if self.action in self.PLURAL_ACTIONS:
return self.PLURAL_ACTIONS[self.action] % self.plural_count
if self.action in self.AUTO_ACTIONS and self.auto_status:
return str(self.AUTO_ACTIONS[self.action])
return str(self.ACTIONS_DICT.get(self.action, self.action))
def is_merge_failure(self):
return self.action in self.ACTIONS_MERGE_FAILURE
def can_revert(self):
return (
self.unit is not None
and self.old
and self.action in self.ACTIONS_REVERTABLE
)
def show_source(self):
"""Whether to show content as source change."""
return self.action == self.ACTION_SOURCE_CHANGE
def show_content(self):
"""Whether to show content as translation."""
return (
self.action in self.ACTIONS_SHOW_CONTENT
or self.action in self.ACTIONS_REVERTABLE
)
def get_details_display(self): # noqa: C901
from weblate.utils.markdown import render_markdown
if self.action in (self.ACTION_ANNOUNCEMENT, self.ACTION_AGREEMENT_CHANGE):
return render_markdown(self.target)
if self.action == self.ACTION_LICENSE_CHANGE:
not_available = pgettext("License information not available", "N/A")
return _(
"License for component %(component)s was changed "
"from %(old)s to %(target)s."
) % {
"component": self.component,
"old": self.old or not_available,
"target": self.target or not_available,
}
# Following rendering relies on details present
if not self.details:
return ""
user_actions = {
self.ACTION_ADD_USER,
self.ACTION_INVITE_USER,
self.ACTION_REMOVE_USER,
}
if self.action == self.ACTION_ACCESS_EDIT:
for number, name in Project.ACCESS_CHOICES:
if number == self.details["access_control"]:
return name
return "Unknonwn {}".format(self.details["access_control"])
if self.action in user_actions:
if "group" in self.details:
return "{username} ({group})".format(**self.details)
return self.details["username"]
if self.action in (
self.ACTION_ADDED_LANGUAGE,
self.ACTION_REQUESTED_LANGUAGE,
): # noqa: E501
try:
return Language.objects.get(code=self.details["language"])
except Language.DoesNotExist:
return self.details["language"]
if self.action == self.ACTION_ALERT:
try:
return ALERTS[self.details["alert"]].verbose
except KeyError:
return self.details["alert"]
if self.action == self.ACTION_PARSE_ERROR:
return "{filename}: {error_message}".format(**self.details)
if self.action == self.ACTION_HOOK:
return "{service_long_name}: {repo_url}, {branch}".format(**self.details)
if self.action == self.ACTION_COMMENT and "comment" in self.details:
return render_markdown(self.details["comment"])
return ""
def get_distance(self):
try:
return damerau_levenshtein_distance(self.old, self.target)
except MemoryError:
# Too long strings
return abs(len(self.old) - len(self.target))
| gpl-3.0 |
Qalthos/ansible | lib/ansible/modules/network/fortios/fortios_firewall_internet_service_group.py | 24 | 8318 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_internet_service_group
short_description: Configure group of Internet Service in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure firewall feature and internet_service_group category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
firewall_internet_service_group:
description:
- Configure group of Internet Service.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Comment.
member:
description:
- Internet Service group member.
suboptions:
id:
description:
- Internet Service ID. Source firewall.internet-service.id.
required: true
name:
description:
- Internet Service group name.
required: true
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure group of Internet Service.
fortios_firewall_internet_service_group:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
firewall_internet_service_group:
state: "present"
comment: "Comment."
member:
-
id: "5 (source firewall.internet-service.id)"
name: "default_name_6"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_firewall_internet_service_group_data(json):
option_list = ['comment', 'member', 'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def firewall_internet_service_group(data, fos):
vdom = data['vdom']
firewall_internet_service_group_data = data['firewall_internet_service_group']
filtered_data = filter_firewall_internet_service_group_data(firewall_internet_service_group_data)
if firewall_internet_service_group_data['state'] == "present":
return fos.set('firewall',
'internet-service-group',
data=filtered_data,
vdom=vdom)
elif firewall_internet_service_group_data['state'] == "absent":
return fos.delete('firewall',
'internet-service-group',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_firewall(data, fos):
login(data)
methodlist = ['firewall_internet_service_group']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"firewall_internet_service_group": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"member": {"required": False, "type": "list",
"options": {
"id": {"required": True, "type": "int"}
}},
"name": {"required": True, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_firewall(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
IEEERobotics/high-level | qwe/localizer/map.py | 1 | 2010 | #!/usr/bin/python
from numpy import array, zeros
import csv
# loads map into 2d list:
# [y][x] are map coordinates
# [0][0] is bottom left corner
class Map():
def __init__(self, filename = None, scale = 1, logger = None):
self.logger = logger
if filename:
data = list( csv.reader(open(filename, 'r')))
data = [ [int(x) for x in y] for y in data ] # convert string to ints
data.reverse()
self.data = array(data)
self.scale = scale # inches per element
self.map_obj = None
self.logger.debug("Map initialized from file: %s" % filename)
self.logger.debug("Map dimensions %s" % self)
def xy(self):
""" Converts from matrix of 0s and 1s to an array of xy pairs.
New coordinates are offset by 0.5 to represent center of wall (for plotting) """
xy = []
for y in range(len(self.data)):
for x in range(len(self.data[0])):
if self.data[y][x] == 1:
xy.append([x+0.5,y+0.5])
return array(xy)
@classmethod
def from_map_class(self, map_obj, logger = None):
logger.debug("Map initialized from map_class object")
m = Map(logger = logger)
m.map_obj = map_obj
m.update()
#desc_to_walls = zeros(10,dtype=bool)
#desc_to_walls[8] = True
return m
def update(self):
self.logger.debug("Repopulating map data from class")
desc_to_walls = zeros(10,dtype=int)
desc_to_walls[8] = 1
self.data = array([desc_to_walls[i] for i in self.map_obj.grid[:][:]['desc']])
self.scale = 1.0 / self.map_obj.scale
self.logger.debug("Map dimensions %s" % self)
@property
def xdim(self):
return len(self.data[0])
@property
def ydim(self):
return len(self.data)
@property
def x_inches(self):
return len(self.data[0]) * self.scale
@property
def y_inches(self):
return len(self.data) * self.scale
def __str__(self):
return "Map: (%d, %d) = (%0.2f, %0.2f) inches" % (self.xdim, self.ydim, self.xdim * self.scale, self.ydim * self.scale)
| bsd-2-clause |
Anik1199/Kernel_taoshan | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Habbie/pdns | regression-tests.recursor-dnssec/test_RootNXTrust.py | 4 | 3474 | import dns
import requests
import socket
from recursortests import RecursorTest
class RootNXTrustRecursorTest(RecursorTest):
def getOutgoingQueriesCount(self):
headers = {'x-api-key': self._apiKey}
url = 'http://127.0.0.1:' + str(self._wsPort) + '/api/v1/servers/localhost/statistics'
r = requests.get(url, headers=headers, timeout=self._wsTimeout)
self.assertTrue(r)
self.assertEquals(r.status_code, 200)
self.assertTrue(r.json())
content = r.json()
for entry in content:
if entry['name'] == 'all-outqueries':
return int(entry['value'])
return 0
class testRootNXTrustDisabled(RootNXTrustRecursorTest):
_confdir = 'RootNXTrustDisabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=no
qname-minimization=no
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-trust disabled, we still query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
class testRootNXTrustEnabled(RootNXTrustRecursorTest):
_confdir = 'RootNXTrustEnabled'
_wsPort = 8042
_wsTimeout = 2
_wsPassword = 'secretpassword'
_apiKey = 'secretapikey'
_config_template = """
root-nx-trust=yes
webserver=yes
webserver-port=%d
webserver-address=127.0.0.1
webserver-password=%s
api-key=%s
""" % (_wsPort, _wsPassword, _apiKey)
def testRootNXTrust(self):
"""
Check that, with root-nx-trust enabled, we don't query the root for www2.nx-example.
after receiving a NXD from "." for nx-example. as an answer for www.nx-example.
"""
# first query nx.example.
before = self.getOutgoingQueriesCount()
query = dns.message.make_query('www.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
print(res)
self.assertAuthorityHasSOA(res)
# check that we sent one query to the root
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before + 1)
# then query nx2.example.
before = after
query = dns.message.make_query('www2.nx-example.', 'A')
res = self.sendUDPQuery(query)
self.assertRcodeEqual(res, dns.rcode.NXDOMAIN)
self.assertAuthorityHasSOA(res)
after = self.getOutgoingQueriesCount()
self.assertEqual(after, before)
| gpl-2.0 |
nitish116/pystache | pystache/parser.py | 28 | 11623 | # coding: utf-8
"""
Exposes a parse() function to parse template strings.
"""
import re
from pystache import defaults
from pystache.parsed import ParsedTemplate
END_OF_LINE_CHARACTERS = [u'\r', u'\n']
NON_BLANK_RE = re.compile(ur'^(.)', re.M)
# TODO: add some unit tests for this.
# TODO: add a test case that checks for spurious spaces.
# TODO: add test cases for delimiters.
def parse(template, delimiters=None):
"""
Parse a unicode template string and return a ParsedTemplate instance.
Arguments:
template: a unicode template string.
delimiters: a 2-tuple of delimiters. Defaults to the package default.
Examples:
>>> parsed = parse(u"Hey {{#who}}{{name}}!{{/who}}")
>>> print str(parsed).replace('u', '') # This is a hack to get the test to pass both in Python 2 and 3.
['Hey ', _SectionNode(key='who', index_begin=12, index_end=21, parsed=[_EscapeNode(key='name'), '!'])]
"""
if type(template) is not unicode:
raise Exception("Template is not unicode: %s" % type(template))
parser = _Parser(delimiters)
return parser.parse(template)
def _compile_template_re(delimiters):
"""
Return a regular expresssion object (re.RegexObject) instance.
"""
# The possible tag type characters following the opening tag,
# excluding "=" and "{".
tag_types = "!>&/#^"
# TODO: are we following this in the spec?
#
# The tag's content MUST be a non-whitespace character sequence
# NOT containing the current closing delimiter.
#
tag = r"""
(?P<whitespace>[\ \t]*)
%(otag)s \s*
(?:
(?P<change>=) \s* (?P<delims>.+?) \s* = |
(?P<raw>{) \s* (?P<raw_name>.+?) \s* } |
(?P<tag>[%(tag_types)s]?) \s* (?P<tag_key>[\s\S]+?)
)
\s* %(ctag)s
""" % {'tag_types': tag_types, 'otag': re.escape(delimiters[0]), 'ctag': re.escape(delimiters[1])}
return re.compile(tag, re.VERBOSE)
class ParsingError(Exception):
pass
## Node types
def _format(obj, exclude=None):
if exclude is None:
exclude = []
exclude.append('key')
attrs = obj.__dict__
names = list(set(attrs.keys()) - set(exclude))
names.sort()
names.insert(0, 'key')
args = ["%s=%s" % (name, repr(attrs[name])) for name in names]
return "%s(%s)" % (obj.__class__.__name__, ", ".join(args))
class _CommentNode(object):
def __repr__(self):
return _format(self)
def render(self, engine, context):
return u''
class _ChangeNode(object):
def __init__(self, delimiters):
self.delimiters = delimiters
def __repr__(self):
return _format(self)
def render(self, engine, context):
return u''
class _EscapeNode(object):
def __init__(self, key):
self.key = key
def __repr__(self):
return _format(self)
def render(self, engine, context):
s = engine.fetch_string(context, self.key)
return engine.escape(s)
class _LiteralNode(object):
def __init__(self, key):
self.key = key
def __repr__(self):
return _format(self)
def render(self, engine, context):
s = engine.fetch_string(context, self.key)
return engine.literal(s)
class _PartialNode(object):
def __init__(self, key, indent):
self.key = key
self.indent = indent
def __repr__(self):
return _format(self)
def render(self, engine, context):
template = engine.resolve_partial(self.key)
# Indent before rendering.
template = re.sub(NON_BLANK_RE, self.indent + ur'\1', template)
return engine.render(template, context)
class _InvertedNode(object):
def __init__(self, key, parsed_section):
self.key = key
self.parsed_section = parsed_section
def __repr__(self):
return _format(self)
def render(self, engine, context):
# TODO: is there a bug because we are not using the same
# logic as in fetch_string()?
data = engine.resolve_context(context, self.key)
# Note that lambdas are considered truthy for inverted sections
# per the spec.
if data:
return u''
return self.parsed_section.render(engine, context)
class _SectionNode(object):
# TODO: the template_ and parsed_template_ arguments don't both seem
# to be necessary. Can we remove one of them? For example, if
# callable(data) is True, then the initial parsed_template isn't used.
def __init__(self, key, parsed, delimiters, template, index_begin, index_end):
self.delimiters = delimiters
self.key = key
self.parsed = parsed
self.template = template
self.index_begin = index_begin
self.index_end = index_end
def __repr__(self):
return _format(self, exclude=['delimiters', 'template'])
def render(self, engine, context):
values = engine.fetch_section_data(context, self.key)
parts = []
for val in values:
if callable(val):
# Lambdas special case section rendering and bypass pushing
# the data value onto the context stack. From the spec--
#
# When used as the data value for a Section tag, the
# lambda MUST be treatable as an arity 1 function, and
# invoked as such (passing a String containing the
# unprocessed section contents). The returned value
# MUST be rendered against the current delimiters, then
# interpolated in place of the section.
#
# Also see--
#
# https://github.com/defunkt/pystache/issues/113
#
# TODO: should we check the arity?
val = val(self.template[self.index_begin:self.index_end])
val = engine._render_value(val, context, delimiters=self.delimiters)
parts.append(val)
continue
context.push(val)
parts.append(self.parsed.render(engine, context))
context.pop()
return unicode(''.join(parts))
class _Parser(object):
_delimiters = None
_template_re = None
def __init__(self, delimiters=None):
if delimiters is None:
delimiters = defaults.DELIMITERS
self._delimiters = delimiters
def _compile_delimiters(self):
self._template_re = _compile_template_re(self._delimiters)
def _change_delimiters(self, delimiters):
self._delimiters = delimiters
self._compile_delimiters()
def parse(self, template):
"""
Parse a template string starting at some index.
This method uses the current tag delimiter.
Arguments:
template: a unicode string that is the template to parse.
index: the index at which to start parsing.
Returns:
a ParsedTemplate instance.
"""
self._compile_delimiters()
start_index = 0
content_end_index, parsed_section, section_key = None, None, None
parsed_template = ParsedTemplate()
states = []
while True:
match = self._template_re.search(template, start_index)
if match is None:
break
match_index = match.start()
end_index = match.end()
matches = match.groupdict()
# Normalize the matches dictionary.
if matches['change'] is not None:
matches.update(tag='=', tag_key=matches['delims'])
elif matches['raw'] is not None:
matches.update(tag='&', tag_key=matches['raw_name'])
tag_type = matches['tag']
tag_key = matches['tag_key']
leading_whitespace = matches['whitespace']
# Standalone (non-interpolation) tags consume the entire line,
# both leading whitespace and trailing newline.
did_tag_begin_line = match_index == 0 or template[match_index - 1] in END_OF_LINE_CHARACTERS
did_tag_end_line = end_index == len(template) or template[end_index] in END_OF_LINE_CHARACTERS
is_tag_interpolating = tag_type in ['', '&']
if did_tag_begin_line and did_tag_end_line and not is_tag_interpolating:
if end_index < len(template):
end_index += template[end_index] == '\r' and 1 or 0
if end_index < len(template):
end_index += template[end_index] == '\n' and 1 or 0
elif leading_whitespace:
match_index += len(leading_whitespace)
leading_whitespace = ''
# Avoid adding spurious empty strings to the parse tree.
if start_index != match_index:
parsed_template.add(template[start_index:match_index])
start_index = end_index
if tag_type in ('#', '^'):
# Cache current state.
state = (tag_type, end_index, section_key, parsed_template)
states.append(state)
# Initialize new state
section_key, parsed_template = tag_key, ParsedTemplate()
continue
if tag_type == '/':
if tag_key != section_key:
raise ParsingError("Section end tag mismatch: %s != %s" % (tag_key, section_key))
# Restore previous state with newly found section data.
parsed_section = parsed_template
(tag_type, section_start_index, section_key, parsed_template) = states.pop()
node = self._make_section_node(template, tag_type, tag_key, parsed_section,
section_start_index, match_index)
else:
node = self._make_interpolation_node(tag_type, tag_key, leading_whitespace)
parsed_template.add(node)
# Avoid adding spurious empty strings to the parse tree.
if start_index != len(template):
parsed_template.add(template[start_index:])
return parsed_template
def _make_interpolation_node(self, tag_type, tag_key, leading_whitespace):
"""
Create and return a non-section node for the parse tree.
"""
# TODO: switch to using a dictionary instead of a bunch of ifs and elifs.
if tag_type == '!':
return _CommentNode()
if tag_type == '=':
delimiters = tag_key.split()
self._change_delimiters(delimiters)
return _ChangeNode(delimiters)
if tag_type == '':
return _EscapeNode(tag_key)
if tag_type == '&':
return _LiteralNode(tag_key)
if tag_type == '>':
return _PartialNode(tag_key, leading_whitespace)
raise Exception("Invalid symbol for interpolation tag: %s" % repr(tag_type))
def _make_section_node(self, template, tag_type, tag_key, parsed_section,
section_start_index, section_end_index):
"""
Create and return a section node for the parse tree.
"""
if tag_type == '#':
return _SectionNode(tag_key, parsed_section, self._delimiters,
template, section_start_index, section_end_index)
if tag_type == '^':
return _InvertedNode(tag_key, parsed_section)
raise Exception("Invalid symbol for section tag: %s" % repr(tag_type))
| mit |
eduardocasarin/Arduino | arduino-core/src/processing/app/i18n/python/requests/status_codes.py | 252 | 3043 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'),
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| lgpl-2.1 |
yd0str/infernal-twin | build/pillow/Tests/test_image_getpixel.py | 12 | 1416 | from helper import unittest, PillowTestCase
from PIL import Image
Image.USE_CFFI_ACCESS = False
def color(mode):
bands = Image.getmodebands(mode)
if bands == 1:
return 1
else:
return tuple(range(1, bands+1))
class TestImageGetPixel(PillowTestCase):
def check(self, mode, c=None):
if not c:
c = color(mode)
# check putpixel
im = Image.new(mode, (1, 1), None)
im.putpixel((0, 0), c)
self.assertEqual(
im.getpixel((0, 0)), c,
"put/getpixel roundtrip failed for mode %s, color %s" % (mode, c))
# check inital color
im = Image.new(mode, (1, 1), c)
self.assertEqual(
im.getpixel((0, 0)), c,
"initial color failed for mode %s, color %s " % (mode, color))
def test_basic(self):
for mode in ("1", "L", "LA", "I", "I;16", "I;16B", "F",
"P", "PA", "RGB", "RGBA", "RGBX", "CMYK", "YCbCr"):
self.check(mode)
def test_signedness(self):
# see https://github.com/python-pillow/Pillow/issues/452
# pixelaccess is using signed int* instead of uint*
for mode in ("I;16", "I;16B"):
self.check(mode, 2**15-1)
self.check(mode, 2**15)
self.check(mode, 2**15+1)
self.check(mode, 2**16-1)
if __name__ == '__main__':
unittest.main()
# End of file
| gpl-3.0 |
AnishWalawalkar/weekdays | weekdays/weekdays.py | 1 | 2474 | '''python module to work with weekdays'''
import time
from datetime import timedelta
from datetime import datetime
__all__ = [
'is_weekday', 'weekdays_passed', 'weekdays_until',
'date_range', 'prev_weekday', 'next_weekday', 'time_to_datetime'
]
def time_to_datetime(time_struct=None, time_in_seconds=None):
'''heloper function to convert time object to datetime object'''
if not (time_struct or time_in_seconds):
raise Exception(
'One of "time_struct" or "time_in_seconds" keyword arguements must be provided')
if time_struct:
try:
time_in_seconds = time.mktime(time_struct)
except TypeError as e:
raise Exception(e)
return datetime.fromtimestamp(time_in_seconds)
def is_weekday(date):
'''Checks if the date provided as an arguement is a weekday or not'''
return True if date.weekday() in range(0, 5) else False
def weekdays_passed(start, end):
'''
returns the numbers of weekdays between the start date and end date
note: start > end
'''
return weekdays_until(end, start)
def weekdays_until(start, end):
'''
returns the numbers of weekdays between the start date and end date
note : end > start
'''
if end < start:
return 0
daydiff = end.weekday() - start.weekday()
return ((end-start).days - daydiff) / 7 * 5 + min(daydiff, 5) - (max(end.weekday() - 4, 0) % 5)
def date_range(start, end):
'''returns a generator that can be used to iterate over the weekdays in the range start to end-1'''
if start > end:
return
while not start == end:
if is_weekday(start):
yield start
start += timedelta(days=1)
def weekend_offset(date, flag=False):
'''internal helper function'''
if date.weekday() == 5:
return 2 if flag else 1
return 1 if flag else 2
def prev_weekday(date, num_days=1):
'''return the previous weekday'''
if num_days < 0:
return date
day_count = 0
prev_day = date
while day_count < num_days:
prev_day -= timedelta(days=1)
if is_weekday(prev_day):
day_count += 1
return prev_day
def next_weekday(date, num_days=1):
'''return the next weekday'''
if num_days < 0:
return date
day_count = 0
next_day = date
while day_count < num_days:
next_day += timedelta(days=1)
if is_weekday(next_day):
day_count += 1
return next_day
| mit |
suneeth51/neutron | neutron/extensions/quotasv2.py | 25 | 5305 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import importutils
import webob
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import constants as const
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron import quota
from neutron.quota import resource_registry
from neutron import wsgi
RESOURCE_NAME = 'quota'
RESOURCE_COLLECTION = RESOURCE_NAME + "s"
QUOTAS = quota.QUOTAS
DB_QUOTA_DRIVER = 'neutron.db.quota.driver.DbQuotaDriver'
EXTENDED_ATTRIBUTES_2_0 = {
RESOURCE_COLLECTION: {}
}
class QuotaSetsController(wsgi.Controller):
def __init__(self, plugin):
self._resource_name = RESOURCE_NAME
self._plugin = plugin
self._driver = importutils.import_class(
cfg.CONF.QUOTAS.quota_driver
)
self._update_extended_attributes = True
def _update_attributes(self):
for quota_resource in resource_registry.get_all_resources().keys():
attr_dict = EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION]
attr_dict[quota_resource] = {
'allow_post': False,
'allow_put': True,
'convert_to': attributes.convert_to_int,
'validate': {'type:range': [-1, const.DB_INTEGER_MAX_VALUE]},
'is_visible': True}
self._update_extended_attributes = False
def _get_quotas(self, request, tenant_id):
return self._driver.get_tenant_quotas(
request.context,
resource_registry.get_all_resources(),
tenant_id)
def create(self, request, body=None):
msg = _('POST requests are not supported on this resource.')
raise webob.exc.HTTPNotImplemented(msg)
def index(self, request):
context = request.context
self._check_admin(context)
return {self._resource_name + "s":
self._driver.get_all_quotas(
context, resource_registry.get_all_resources())}
def tenant(self, request):
"""Retrieve the tenant info in context."""
context = request.context
if not context.tenant_id:
raise n_exc.QuotaMissingTenant()
return {'tenant': {'tenant_id': context.tenant_id}}
def show(self, request, id):
if id != request.context.tenant_id:
self._check_admin(request.context,
reason=_("Only admin is authorized "
"to access quotas for another tenant"))
return {self._resource_name: self._get_quotas(request, id)}
def _check_admin(self, context,
reason=_("Only admin can view or configure quota")):
if not context.is_admin:
raise n_exc.AdminRequired(reason=reason)
def delete(self, request, id):
self._check_admin(request.context)
self._driver.delete_tenant_quota(request.context, id)
def update(self, request, id, body=None):
self._check_admin(request.context)
if self._update_extended_attributes:
self._update_attributes()
body = base.Controller.prepare_request_body(
request.context, body, False, self._resource_name,
EXTENDED_ATTRIBUTES_2_0[RESOURCE_COLLECTION])
for key, value in body[self._resource_name].items():
self._driver.update_quota_limit(request.context, id, key, value)
return {self._resource_name: self._get_quotas(request, id)}
class Quotasv2(extensions.ExtensionDescriptor):
"""Quotas management support."""
@classmethod
def get_name(cls):
return "Quota management support"
@classmethod
def get_alias(cls):
return RESOURCE_COLLECTION
@classmethod
def get_description(cls):
description = 'Expose functions for quotas management'
if cfg.CONF.QUOTAS.quota_driver == DB_QUOTA_DRIVER:
description += ' per tenant'
return description
@classmethod
def get_updated(cls):
return "2012-07-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
controller = resource.Resource(
QuotaSetsController(manager.NeutronManager.get_plugin()),
faults=base.FAULT_MAP)
return [extensions.ResourceExtension(
Quotasv2.get_alias(),
controller,
collection_actions={'tenant': 'GET'})]
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| apache-2.0 |
thedrow/django | django/db/models/lookups.py | 194 | 16328 | import inspect
from copy import copy
from django.utils.functional import cached_property
from django.utils.six.moves import range
from .query_utils import QueryWrapper
class RegisterLookupMixin(object):
def _get_lookup(self, lookup_name):
try:
return self.class_lookups[lookup_name]
except KeyError:
# To allow for inheritance, check parent class' class_lookups.
for parent in inspect.getmro(self.__class__):
if 'class_lookups' not in parent.__dict__:
continue
if lookup_name in parent.class_lookups:
return parent.class_lookups[lookup_name]
except AttributeError:
# This class didn't have any class_lookups
pass
return None
def get_lookup(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@classmethod
def register_lookup(cls, lookup):
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup.lookup_name] = lookup
return lookup
@classmethod
def _unregister_lookup(cls, lookup):
"""
Removes given lookup from cls lookups. Meant to be used in
tests only.
"""
del cls.class_lookups[lookup.lookup_name]
class Transform(RegisterLookupMixin):
bilateral = False
def __init__(self, lhs, lookups):
self.lhs = lhs
self.init_lookups = lookups[:]
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def output_field(self):
return self.lhs.output_field
def copy(self):
return copy(self)
def relabeled_clone(self, relabels):
copy = self.copy()
copy.lhs = self.lhs.relabeled_clone(relabels)
return copy
def get_group_by_cols(self):
return self.lhs.get_group_by_cols()
def get_bilateral_transforms(self):
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if self.bilateral:
bilateral_transforms.append((self.__class__, self.init_lookups))
return bilateral_transforms
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate
class Lookup(RegisterLookupMixin):
lookup_name = None
def __init__(self, lhs, rhs):
self.lhs, self.rhs = lhs, rhs
self.rhs = self.get_prep_lookup()
if hasattr(self.lhs, 'get_bilateral_transforms'):
bilateral_transforms = self.lhs.get_bilateral_transforms()
else:
bilateral_transforms = []
if bilateral_transforms:
# We should warn the user as soon as possible if he is trying to apply
# a bilateral transformation on a nested QuerySet: that won't work.
# We need to import QuerySet here so as to avoid circular
from django.db.models.query import QuerySet
if isinstance(rhs, QuerySet):
raise NotImplementedError("Bilateral transformations on nested querysets are not supported.")
self.bilateral_transforms = bilateral_transforms
def apply_bilateral_transforms(self, value):
for transform, lookups in self.bilateral_transforms:
value = transform(value, lookups)
return value
def batch_process_rhs(self, compiler, connection, rhs=None):
if rhs is None:
rhs = self.rhs
if self.bilateral_transforms:
sqls, sqls_params = [], []
for p in rhs:
value = QueryWrapper('%s',
[self.lhs.output_field.get_db_prep_value(p, connection)])
value = self.apply_bilateral_transforms(value)
sql, sql_params = compiler.compile(value)
sqls.append(sql)
sqls_params.extend(sql_params)
else:
params = self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, rhs, connection, prepared=True)
sqls, sqls_params = ['%s'] * len(params), params
return sqls, sqls_params
def get_prep_lookup(self):
return self.lhs.output_field.get_prep_lookup(self.lookup_name, self.rhs)
def get_db_prep_lookup(self, value, connection):
return (
'%s', self.lhs.output_field.get_db_prep_lookup(
self.lookup_name, value, connection, prepared=True))
def process_lhs(self, compiler, connection, lhs=None):
lhs = lhs or self.lhs
return compiler.compile(lhs)
def process_rhs(self, compiler, connection):
value = self.rhs
if self.bilateral_transforms:
if self.rhs_is_direct_value():
# Do not call get_db_prep_lookup here as the value will be
# transformed before being used for lookup
value = QueryWrapper("%s",
[self.lhs.output_field.get_db_prep_value(value, connection)])
value = self.apply_bilateral_transforms(value)
# Due to historical reasons there are a couple of different
# ways to produce sql here. get_compiler is likely a Query
# instance, _as_sql QuerySet and as_sql just something with
# as_sql. Finally the value can of course be just plain
# Python value.
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql'):
sql, params = compiler.compile(value)
return '(' + sql + ')', params
if hasattr(value, '_as_sql'):
sql, params = value._as_sql(connection=connection)
return '(' + sql + ')', params
else:
return self.get_db_prep_lookup(value, connection)
def rhs_is_direct_value(self):
return not(
hasattr(self.rhs, 'as_sql') or
hasattr(self.rhs, '_as_sql') or
hasattr(self.rhs, 'get_compiler'))
def relabeled_clone(self, relabels):
new = copy(self)
new.lhs = new.lhs.relabeled_clone(relabels)
if hasattr(new.rhs, 'relabeled_clone'):
new.rhs = new.rhs.relabeled_clone(relabels)
return new
def get_group_by_cols(self):
cols = self.lhs.get_group_by_cols()
if hasattr(self.rhs, 'get_group_by_cols'):
cols.extend(self.rhs.get_group_by_cols())
return cols
def as_sql(self, compiler, connection):
raise NotImplementedError
@cached_property
def contains_aggregate(self):
return self.lhs.contains_aggregate or getattr(self.rhs, 'contains_aggregate', False)
class BuiltinLookup(Lookup):
def process_lhs(self, compiler, connection, lhs=None):
lhs_sql, params = super(BuiltinLookup, self).process_lhs(
compiler, connection, lhs)
field_internal_type = self.lhs.output_field.get_internal_type()
db_type = self.lhs.output_field.db_type(connection=connection)
lhs_sql = connection.ops.field_cast_sql(
db_type, field_internal_type) % lhs_sql
lhs_sql = connection.ops.lookup_cast(self.lookup_name, field_internal_type) % lhs_sql
return lhs_sql, params
def as_sql(self, compiler, connection):
lhs_sql, params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
params.extend(rhs_params)
rhs_sql = self.get_rhs_op(connection, rhs_sql)
return '%s %s' % (lhs_sql, rhs_sql), params
def get_rhs_op(self, connection, rhs):
return connection.operators[self.lookup_name] % rhs
default_lookups = {}
class Exact(BuiltinLookup):
lookup_name = 'exact'
default_lookups['exact'] = Exact
class IExact(BuiltinLookup):
lookup_name = 'iexact'
def process_rhs(self, qn, connection):
rhs, params = super(IExact, self).process_rhs(qn, connection)
if params:
params[0] = connection.ops.prep_for_iexact_query(params[0])
return rhs, params
default_lookups['iexact'] = IExact
class GreaterThan(BuiltinLookup):
lookup_name = 'gt'
default_lookups['gt'] = GreaterThan
class GreaterThanOrEqual(BuiltinLookup):
lookup_name = 'gte'
default_lookups['gte'] = GreaterThanOrEqual
class LessThan(BuiltinLookup):
lookup_name = 'lt'
default_lookups['lt'] = LessThan
class LessThanOrEqual(BuiltinLookup):
lookup_name = 'lte'
default_lookups['lte'] = LessThanOrEqual
class In(BuiltinLookup):
lookup_name = 'in'
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable, we use batch_process_rhs
# to prepare/transform those values
rhs = list(self.rhs)
if not rhs:
from django.db.models.sql.datastructures import EmptyResultSet
raise EmptyResultSet
sqls, sqls_params = self.batch_process_rhs(compiler, connection, rhs)
placeholder = '(' + ', '.join(sqls) + ')'
return (placeholder, sqls_params)
else:
return super(In, self).process_rhs(compiler, connection)
def get_rhs_op(self, connection, rhs):
return 'IN %s' % rhs
def as_sql(self, compiler, connection):
max_in_list_size = connection.ops.max_in_list_size()
if self.rhs_is_direct_value() and (max_in_list_size and
len(self.rhs) > max_in_list_size):
# This is a special case for Oracle which limits the number of elements
# which can appear in an 'IN' clause.
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.batch_process_rhs(compiler, connection)
in_clause_elements = ['(']
params = []
for offset in range(0, len(rhs_params), max_in_list_size):
if offset > 0:
in_clause_elements.append(' OR ')
in_clause_elements.append('%s IN (' % lhs)
params.extend(lhs_params)
sqls = rhs[offset: offset + max_in_list_size]
sqls_params = rhs_params[offset: offset + max_in_list_size]
param_group = ', '.join(sqls)
in_clause_elements.append(param_group)
in_clause_elements.append(')')
params.extend(sqls_params)
in_clause_elements.append(')')
return ''.join(in_clause_elements), params
else:
return super(In, self).as_sql(compiler, connection)
default_lookups['in'] = In
class PatternLookup(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
# Assume we are in startswith. We need to produce SQL like:
# col LIKE %s, ['thevalue%']
# For python values we can (and should) do that directly in Python,
# but if the value is for example reference to other column, then
# we need to add the % pattern match to the lookup by something like
# col LIKE othercol || '%%'
# So, for Python values we don't need any special pattern, but for
# SQL reference values or SQL transformations we need the correct
# pattern added.
if (hasattr(self.rhs, 'get_compiler') or hasattr(self.rhs, 'as_sql')
or hasattr(self.rhs, '_as_sql') or self.bilateral_transforms):
pattern = connection.pattern_ops[self.lookup_name].format(connection.pattern_esc)
return pattern.format(rhs)
else:
return super(PatternLookup, self).get_rhs_op(connection, rhs)
class Contains(PatternLookup):
lookup_name = 'contains'
def process_rhs(self, qn, connection):
rhs, params = super(Contains, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['contains'] = Contains
class IContains(Contains):
lookup_name = 'icontains'
default_lookups['icontains'] = IContains
class StartsWith(PatternLookup):
lookup_name = 'startswith'
def process_rhs(self, qn, connection):
rhs, params = super(StartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['startswith'] = StartsWith
class IStartsWith(PatternLookup):
lookup_name = 'istartswith'
def process_rhs(self, qn, connection):
rhs, params = super(IStartsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%s%%" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['istartswith'] = IStartsWith
class EndsWith(PatternLookup):
lookup_name = 'endswith'
def process_rhs(self, qn, connection):
rhs, params = super(EndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['endswith'] = EndsWith
class IEndsWith(PatternLookup):
lookup_name = 'iendswith'
def process_rhs(self, qn, connection):
rhs, params = super(IEndsWith, self).process_rhs(qn, connection)
if params and not self.bilateral_transforms:
params[0] = "%%%s" % connection.ops.prep_for_like_query(params[0])
return rhs, params
default_lookups['iendswith'] = IEndsWith
class Between(BuiltinLookup):
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs, rhs)
class Range(BuiltinLookup):
lookup_name = 'range'
def get_rhs_op(self, connection, rhs):
return "BETWEEN %s AND %s" % (rhs[0], rhs[1])
def process_rhs(self, compiler, connection):
if self.rhs_is_direct_value():
# rhs should be an iterable of 2 values, we use batch_process_rhs
# to prepare/transform those values
return self.batch_process_rhs(compiler, connection)
else:
return super(Range, self).process_rhs(compiler, connection)
default_lookups['range'] = Range
class IsNull(BuiltinLookup):
lookup_name = 'isnull'
def as_sql(self, compiler, connection):
sql, params = compiler.compile(self.lhs)
if self.rhs:
return "%s IS NULL" % sql, params
else:
return "%s IS NOT NULL" % sql, params
default_lookups['isnull'] = IsNull
class Search(BuiltinLookup):
lookup_name = 'search'
def as_sql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.fulltext_search_sql(field_name=lhs)
return sql_template, lhs_params + rhs_params
default_lookups['search'] = Search
class Regex(BuiltinLookup):
lookup_name = 'regex'
def as_sql(self, compiler, connection):
if self.lookup_name in connection.operators:
return super(Regex, self).as_sql(compiler, connection)
else:
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
sql_template = connection.ops.regex_lookup(self.lookup_name)
return sql_template % (lhs, rhs), lhs_params + rhs_params
default_lookups['regex'] = Regex
class IRegex(Regex):
lookup_name = 'iregex'
default_lookups['iregex'] = IRegex
| bsd-3-clause |
kaiyuanl/gem5 | src/arch/x86/isa/insts/general_purpose/no_operation.py | 91 | 2323 | # Copyright (c) 2007-2008 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop NOP
{
fault "NoFault"
};
def macroop HINT_NOP
{
fault "NoFault"
};
'''
| bsd-3-clause |
serpilliere/miasm | test/core/sembuilder.py | 2 | 1414 | from __future__ import print_function
import inspect
from pdb import pm
from miasm.core.sembuilder import SemBuilder
from miasm.core.locationdb import LocationDB
import miasm.expression.expression as m2_expr
# Test classes
class IR(object):
def __init__(self, loc_db):
self.loc_db = loc_db
IRDst = m2_expr.ExprId("IRDst", 32)
def get_next_instr(self, _):
return m2_expr.LocKey(0)
def get_next_loc_key(self, _):
return m2_expr.LocKey(0)
class Instr(object):
mode = 32
# Test
sb = SemBuilder(m2_expr.__dict__)
@sb.parse
def test(Arg1, Arg2, Arg3):
"Test docstring"
Arg1 = Arg2
value1 = Arg2
value2 = Arg3 + i32(4) - ExprMem(Arg1, 32)
Arg3 = Arg3 if Arg2 + value1 else i32(0) + value2
tmpvar = 'myop'(i32(2))
Arg2 = ('myopsize%d' % Arg1.size)(tmpvar, Arg1)
alias = Arg1[:24]
if not Arg1:
Arg2 = Arg3
else:
alias = {i16(4), i8(5)}
a = m2_expr.ExprId('A', 32)
b = m2_expr.ExprId('B', 32)
c = m2_expr.ExprId('C', 32)
loc_db = LocationDB()
ir = IR(loc_db)
instr = Instr()
res = test(ir, instr, a, b, c)
print("[+] Returned:")
print(res)
print("[+] DocString:", test.__doc__)
print("[+] Cur instr:")
for statement in res[0]:
print(statement)
print("[+] Blocks:")
for irb in res[1]:
print(irb.loc_key)
for assignblk in irb:
for expr in assignblk:
print(expr)
print()
| gpl-2.0 |
ocadotechnology/appengine-pipelines | python/demo/pipeline/util.py | 14 | 6994 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the Google App Engine Pipeline API."""
__all__ = ["for_name",
"JsonEncoder",
"JsonDecoder"]
#pylint: disable=g-bad-name
import datetime
import inspect
import logging
import os
try:
import json
except ImportError:
import simplejson as json
# pylint: disable=protected-access
def _get_task_target():
"""Get the default target for a pipeline task.
Current version id format is: user_defined_version.minor_version_number
Current module id is just the module's name. It could be "default"
Returns:
A complete target name is of format version.module. If module is the
default module, just version. None if target can not be determined.
"""
# Break circular dependency.
# pylint: disable=g-import-not-at-top
import pipeline
if pipeline._TEST_MODE:
return None
# Further protect against test cases that doesn't set env vars
# propertly.
if ("CURRENT_VERSION_ID" not in os.environ or
"CURRENT_MODULE_ID" not in os.environ):
logging.warning("Running Pipeline in non TEST_MODE but important "
"env vars are not set.")
return None
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
module = os.environ["CURRENT_MODULE_ID"]
if module == "default":
return version
return "%s.%s" % (version, module)
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
class JsonEncoder(json.JSONEncoder):
"""Pipeline customized json encoder."""
TYPE_ID = "__pipeline_json_type"
def default(self, o):
"""Inherit docs."""
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
class JsonDecoder(json.JSONDecoder):
"""Pipeline customized json decoder."""
def __init__(self, **kwargs):
if "object_hook" not in kwargs:
kwargs["object_hook"] = self._dict_to_obj
super(JsonDecoder, self).__init__(**kwargs)
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def _json_encode_datetime(o):
"""Json encode a datetime object.
Args:
o: a datetime object.
Returns:
A dict of json primitives.
"""
return {"isostr": o.strftime(_DATETIME_FORMAT)}
def _json_decode_datetime(d):
"""Converts a dict of json primitives to a datetime object."""
return datetime.datetime.strptime(d["isostr"], _DATETIME_FORMAT)
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what Pipeline can serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns
a dict of json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
_TYPE_TO_ENCODER = {}
_TYPE_NAME_TO_DECODER = {}
_register_json_primitive(datetime.datetime,
_json_encode_datetime,
_json_decode_datetime)
| apache-2.0 |
ccnmtl/lettuce | lettuce/languages.py | 11 | 7581 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
LANGUAGES = {
'en': {
'examples': u'Examples|Scenarios',
'feature': u'Feature',
'name': u'English',
'native': u'English',
'scenario': u'Scenario',
'scenario_outline': u'Scenario Outline',
'scenario_separator': u'(Scenario Outline|Scenario)',
'background': u'(?:Background)',
},
'pt-br': {
'examples': u'Exemplos|Cenários',
'feature': u'Funcionalidade',
'name': u'Portuguese',
'native': u'Português',
'scenario': u'Cenário|Cenario',
'scenario_outline': u'Esquema do Cenário|Esquema do Cenario',
'scenario_separator': u'(Esquema do Cenário|Esquema do Cenario|Cenario|Cenário)',
'background': u'(?:Contexto|Considerações)',
},
'pl': {
'examples': u'Przykład',
'feature': u'Właściwość',
'name': u'Polish',
'native': u'Polski',
'scenario': u'Scenariusz',
'scenario_outline': u'Zarys Scenariusza',
'scenario_separator': u'(Zarys Scenariusza|Scenariusz)',
'background': u'(?:Background)',
},
'ca': {
'examples': u'Exemples',
'feature': u'Funcionalitat',
'name': u'Catalan',
'native': u'Català',
'scenario': u'Escenari',
'scenario_outline': u"Esquema d'Escenari",
'scenario_separator': u"(Esquema d'Escenari|Escenari)",
'background': u'(?:Background)',
},
'es': {
'examples': u'Ejemplos',
'feature': u'Funcionalidad',
'name': u'Spanish',
'native': u'Español',
'scenario': u'Escenario',
'scenario_outline': u'Esquema de Escenario',
'scenario_separator': u'(Esquema de Escenario|Escenario)',
'background': u'(?:Contexto|Consideraciones)',
},
'hu': {
'examples': u'Példák',
'feature': u'Jellemző',
'name': u'Hungarian',
'native': u'Magyar',
'scenario': u'Forgatókönyv',
'scenario_outline': u'Forgatókönyv vázlat',
'scenario_separator': u'(Forgatókönyv|Forgatókönyv vázlat)',
'background': u'(?:Háttér)',
},
'fr': {
'examples': u'Exemples|Scénarios',
'feature': u'Fonctionnalité|Fonction',
'name': u'French',
'native': u'Français',
'scenario': u'Scénario',
'scenario_outline': u'Plan de Scénario|Plan du Scénario',
'scenario_separator': u'(Plan de Scénario|Plan du Scénario|Scénario)',
'background': u'(?:Background|Contexte)',
},
'de': {
'examples': u'Beispiele|Szenarios',
'feature': u'Funktionalität|Funktion',
'name': u'German',
'native': u'Deutsch',
'scenario': u'Szenario',
'scenario_outline': u'Szenario-Zusammenfassung|Zusammenfassung',
'scenario_separator': u'(Szenario-Zusammenfassung|Zusammenfassung)',
'background': u'(?:Background)',
},
'ja': {
'examples': u'例',
'feature': u'フィーチャ|機能',
'name': u'Japanese',
'native': u'日本語',
'scenario': u'シナリオ',
'scenario_outline': u'シナリオアウトライン|シナリオテンプレート|テンプレ|シナリオテンプレ',
'scenario_separator': u'(シナリオ|シナリオアウトライン|シナリオテンプレート|テンプレ|シナリオテンプレ)',
'background': u'背景',
},
'tr': {
'examples': u'Örnekler',
'feature': u'Özellik',
'name': u'Turkish',
'native': u'Türkçe',
'scenario': u'Senaryo',
'scenario_outline': u'Senaryo taslağı|Senaryo Taslağı',
'scenario_separator': u'(Senaryo taslağı|Senaryo Taslağı|Senaryo)',
'background': u'(?:Background)',
},
'zh-CN': {
'examples': u'例如|场景集',
'feature': u'特性',
'name': u'Simplified Chinese',
'native': u'简体中文',
'scenario': u'场景',
'scenario_outline': u'场景模板',
'scenario_separator': u'(场景模板|场景)',
'background': u'(?:背景)',
},
'zh-TW': {
'examples': u'例如|場景集',
'feature': u'特性',
'name': u'Traditional Chinese',
'native': u'繁體中文',
'scenario': u'場景',
'scenario_outline': u'場景模板',
'scenario_separator': u'(場景模板|場景)',
'background': u'(?:背景)',
},
'ru': {
'examples': u'Примеры|Сценарии',
'feature': u'Функционал',
'name': u'Russian',
'native': u'Русский',
'scenario': u'Сценарий',
'scenario_outline': u'Структура сценария',
'scenario_separator': u'(Структура сценария|Сценарий)',
'background': u'(?:Background|Предыстория)',
},
'uk': {
'examples': u'Приклади|Сценарії',
'feature': u'Функціонал',
'name': u'Ukrainian',
'native': u'Українська',
'scenario': u'Сценарій',
'scenario_outline': u'Структура сценарію',
'scenario_separator': u'(Структура сценарію|Сценарій)',
'background': u'(?:Background)',
},
'it': {
'examples': u'Esempi|Scenari|Scenarii',
'feature': u'Funzionalità|Funzione',
'name': u'Italian',
'native': u'Italiano',
'scenario': u'Scenario',
'scenario_outline': u'Schema di Scenario|Piano di Scenario',
'scenario_separator': u'(Schema di Scenario|Piano di Scenario|Scenario)',
'background': u'(?:Background)',
},
'no': {
'examples': u'Eksempler',
'feature': u'Egenskaper',
'name': u'Norwegian',
'native': u'Norsk',
'scenario': u'Situasjon',
'scenario_outline': u'Situasjon Oversikt',
'scenario_separator': u'(Situasjon Oversikt|Situasjon)',
'background': u'(?:Bakgrunn)',
},
'sv': {
'examples': u'Exempel|Scenarion',
'feature': u'Egenskaper',
'name': u'Swedish',
'native': u'Svenska',
'scenario': u'Scenario',
'scenario_outline': u'Scenarioöversikt',
'scenario_separator': u'(Scenarioöversikt|Scenario)',
'background': u'(?:Context)',
},
'cz': {
'examples': u'Příklady',
'feature': u'Požadavek',
'name': u'Czech',
'native': u'Čeština',
'scenario': u'Scénář|Požadavek',
'scenario_outline': u'Náčrt scénáře',
'scenario_separator': u'(Náčrt scénáře|Scénář)',
'background': u'(?:Background)',
},
}
| gpl-3.0 |
thanhacun/odoo | addons/mass_mailing/models/mail_mail.py | 114 | 5292 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import urlparse
import werkzeug.urls
from openerp import tools
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
class MailMail(osv.Model):
"""Add the mass mailing campaign data to mail"""
_name = 'mail.mail'
_inherit = ['mail.mail']
_columns = {
'mailing_id': fields.many2one('mail.mass_mailing', 'Mass Mailing'),
'statistics_ids': fields.one2many(
'mail.mail.statistics', 'mail_mail_id',
string='Statistics',
),
}
def create(self, cr, uid, values, context=None):
""" Override mail_mail creation to create an entry in mail.mail.statistics """
# TDE note: should be after 'all values computed', to have values (FIXME after merging other branch holding create refactoring)
mail_id = super(MailMail, self).create(cr, uid, values, context=context)
if values.get('statistics_ids'):
mail = self.browse(cr, SUPERUSER_ID, mail_id, context=context)
for stat in mail.statistics_ids:
self.pool['mail.mail.statistics'].write(cr, uid, [stat.id], {'message_id': mail.message_id}, context=context)
return mail_id
def _get_tracking_url(self, cr, uid, mail, partner=None, context=None):
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
track_url = urlparse.urljoin(
base_url, 'mail/track/%(mail_id)s/blank.gif?%(params)s' % {
'mail_id': mail.id,
'params': werkzeug.url_encode({'db': cr.dbname})
}
)
return '<img src="%s" alt=""/>' % track_url
def _get_unsubscribe_url(self, cr, uid, mail, email_to, msg=None, context=None):
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
url = urlparse.urljoin(
base_url, 'mail/mailing/%(mailing_id)s/unsubscribe?%(params)s' % {
'mailing_id': mail.mailing_id.id,
'params': werkzeug.url_encode({'db': cr.dbname, 'res_id': mail.res_id, 'email': email_to})
}
)
return '<small><a href="%s">%s</a></small>' % (url, msg or 'Click to unsubscribe')
def send_get_mail_body(self, cr, uid, mail, partner=None, context=None):
""" Override to add the tracking URL to the body. """
body = super(MailMail, self).send_get_mail_body(cr, uid, mail, partner=partner, context=context)
# prepend <base> tag for images using absolute urls
domain = self.pool.get("ir.config_parameter").get_param(cr, uid, "web.base.url", context=context)
base = "<base href='%s'>" % domain
body = tools.append_content_to_html(base, body, plaintext=False, container_tag='div')
# generate tracking URL
if mail.statistics_ids:
tracking_url = self._get_tracking_url(cr, uid, mail, partner, context=context)
if tracking_url:
body = tools.append_content_to_html(body, tracking_url, plaintext=False, container_tag='div')
return body
def send_get_email_dict(self, cr, uid, mail, partner=None, context=None):
res = super(MailMail, self).send_get_email_dict(cr, uid, mail, partner, context=context)
if mail.mailing_id and res.get('body') and res.get('email_to'):
emails = tools.email_split(res.get('email_to')[0])
email_to = emails and emails[0] or False
unsubscribe_url = self._get_unsubscribe_url(cr, uid, mail, email_to, context=context)
if unsubscribe_url:
res['body'] = tools.append_content_to_html(res['body'], unsubscribe_url, plaintext=False, container_tag='p')
return res
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent is True and mail.statistics_ids:
self.pool['mail.mail.statistics'].write(cr, uid, [s.id for s in mail.statistics_ids], {'sent': fields.datetime.now()}, context=context)
elif mail_sent is False and mail.statistics_ids:
self.pool['mail.mail.statistics'].write(cr, uid, [s.id for s in mail.statistics_ids], {'exception': fields.datetime.now()}, context=context)
return super(MailMail, self)._postprocess_sent_message(cr, uid, mail, context=context, mail_sent=mail_sent)
| agpl-3.0 |
GoogleCloudPlatform/training-data-analyst | courses/developingapps/python/appengine/start/frontend/quiz/gcp/pubsub.py | 20 | 1995 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
project_id = os.getenv('GCLOUD_PROJECT')
# TODO: Load the Cloud Pub/Sub module
from google.cloud import pubsub_v1
# END TODO
from flask import current_app
# TODO: Create a Pub/Sub Publisher Client
publisher = pubsub_v1.PublisherClient()
# END TODO
# TODO: Create a Pub/Sub Subscriber Client
sub_client = pubsub_v1.SubscriberClient()
# END TODO
# TODO: Create a Topic Object to reference the feedback topic
topic_path = publisher.topic_path(project_id, 'feedback')
# END TODO
# TODO: Create a Subscription object named worker-subscription
sub_path = sub_client.subscription_path(project_id, 'worker-subscription')
# END TODO
"""
Publishes feedback info
- jsonify feedback object
- encode as bytestring
- publish message
- return result
"""
def publish_feedback(feedback):
# TODO: Publish the feedback object to the feedback topic
payload = json.dumps(feedback, indent=2, sort_keys=True)
data = payload.encode('utf-8')
future = publisher.publish(topic_path, data=data)
return future.result()
# END TODO
"""pull_feedback
Starts pulling messages from subscription
- receive callback function from calling module
- initiate the pull providing the callback function
"""
def pull_feedback(callback):
# TODO: Subscriber to the worker-subscription,
# invoking the callback
sub_client.subscribe(sub_path, callback=callback)
# END TODO
| apache-2.0 |
dknlght/dkodi | src/script.module.turtle/lib/TurtleContainer.py | 1 | 6944 | '''
Created on Oct 17, 2011
'''
from common import DataObjects, XBMCInterfaceUtils, AddonUtils
from common.Singleton import SingletonClass
from common.XBMCInterfaceUtils import ProgressDisplayer
from definition.Turtle import Action, Move, Service
import sys
import xbmcaddon #@UnresolvedImport
__author__ = "ajju"
__version__ = "1.0.0"
class AddonContext(SingletonClass):
'''
AddonContext will provide a way for container to access the route
'''
def __initialize__(self, addon_id):
#Addon information
self.addon = xbmcaddon.Addon(id=addon_id)
self.addonPath = self.addon.getAddonInfo('path')
self.addonProfile = self.addon.getAddonInfo('profile')
self.turtle_addon = xbmcaddon.Addon(id='script.module.turtle')
self.turtle_addonPath = self.turtle_addon.getAddonInfo('path')
self.turtle_addonProfile = self.turtle_addon.getAddonInfo('profile')
turtle_filepath = AddonUtils.getCompleteFilePath(self.addonPath, 'config', 'turtle.xml')
if not AddonUtils.doesFileExist(turtle_filepath):
turtle_filepath = AddonUtils.getCompleteFilePath(self.turtle_addonPath, 'lib/config', 'turtle.xml')
self.turtle_map = AddonUtils.getBeautifulSoupObj(turtle_filepath)
def getTurtleRoute(self, actionId):
actionTag = self.turtle_map.find(name='action', attrs={'id':actionId})
actionObj = Action(actionTag['id'])
if actionTag.has_key('pmessage'):
ProgressDisplayer().displayMessage(5, pmessage=actionTag['pmessage'])
for moveTag in actionTag.findAll('move'):
modulePath = moveTag['module']
functionName = moveTag['function']
pmessage = None
if moveTag.has_key('pmessage'):
pmessage = moveTag['pmessage']
actionObj.addMove(Move(modulePath, functionName, pmessage))
for nextActionTag in actionTag.findAll('next-action'):
actionName = nextActionTag['name']
actionId = nextActionTag['id']
actionObj.addNextAction(actionName, actionId)
for redirectActionTag in actionTag.findAll('redirect-action'):
actionName = redirectActionTag['name']
actionId = redirectActionTag['id']
actionObj.addRedirectAction(actionName, actionId)
return actionObj
def isNextActionFolder(self, actionId, nextActionName):
actionTag = self.turtle_map.find(name='action', attrs={'id':actionId})
nextActionTag = actionTag.find(name='next-action', attrs={'name':nextActionName})
return (nextActionTag['isfolder'] == 'true')
def getTurtleServices(self):
services = []
serviceTags = self.turtle_map.findAll(name='service')
for serviceTag in serviceTags:
services.append(Service(serviceTag['name'], serviceTag['action-id']))
return services
'''CONTAINER FUNCTIONS START FROM HERE'''
#INITIALIZE CONTAINER
class Container(SingletonClass):
def __initialize__(self, addon_id):
self.addon_context = AddonContext(addon_id=addon_id)
def getAddonContext(self):
return self.addon_context
def getTurtleRequest(self):
params = None
if len(sys.argv) >= 3:
params = str(sys.argv[2])
self.request_obj = DataObjects.Request(params=params)
self.response_obj = DataObjects.Response()
return self.request_obj
def getTurtleResponse(self):
return self.response_obj
def reloadTurtleRequest(self, params):
self.request_obj = DataObjects.Request(params=params)
self.request_obj.__initialize__(params)
self.response_obj = DataObjects.Response()
self.response_obj.reset_item_list()
def getTurtleRoute(self, actionId):
return self.addon_context.getTurtleRoute(actionId)
def moveTurtle(self, moveObj):
if moveObj.get_pmessage() is not None:
ProgressDisplayer().displayMessage(50, pmessage=moveObj.get_pmessage())
components = moveObj.module_path.split('.')
module = __import__(moveObj.module_path)
if components is not None and isinstance(components, list):
for index in range(1, len(components)):
module = getattr(module, components[index])
function = getattr(module, moveObj.function_name)
function(self.request_obj, self.response_obj)
def judgeTurtleNextAction(self, actionObj):
ProgressDisplayer().displayMessage(80, line1='Preparing items for display or play', line2='Total items: ' + str(len(self.response_obj.get_item_list())))
if self.response_obj.get_redirect_action_name() is None:
isAnyVideoItem = False
for item in self.response_obj.get_item_list():
nextActionId = actionObj.get_next_action_map()[item.get_next_action_name()]
if nextActionId == '__play__':
if not isAnyVideoItem:
XBMCInterfaceUtils.clearPlayList() #Clear playlist item only when at least one video item is found.
XBMCInterfaceUtils.addPlayListItem(item)
isAnyVideoItem = True
elif nextActionId == '__service_response__':
#Do Nothing , get response object from container for parameters to be returned
pass
else:
is_Folder = self.addon_context.isNextActionFolder(actionObj.get_action_id(), item.get_next_action_name())
XBMCInterfaceUtils.addFolderItem(item, nextActionId, is_Folder)
if isAnyVideoItem == True:
ProgressDisplayer().end()
XBMCInterfaceUtils.play()
else:
if self.response_obj.get_xbmc_sort_method() is not None:
XBMCInterfaceUtils.sortItems(self.response_obj.get_xbmc_sort_method())
if self.response_obj.get_xbmc_content_type() is not None:
XBMCInterfaceUtils.setContentType(self.response_obj.get_xbmc_content_type())
else:
redirectActionId = actionObj.get_redirect_action_map()[self.response_obj.get_redirect_action_name()]
self.response_obj.set_redirect_action_name(None)
return redirectActionId
def performAction(self, actionId):
ProgressDisplayer().start('Processing request...')
while actionId is not None:
print 'Action to be performed ::' + actionId
turtle_route = self.getTurtleRoute(actionId)
for move in turtle_route.moves:
self.moveTurtle(move)
actionId = self.judgeTurtleNextAction(turtle_route)
ProgressDisplayer().end()
| gpl-2.0 |
ksmaheshkumar/mycli | mycli/clibuffer.py | 10 | 1242 | from prompt_toolkit.buffer import Buffer
from prompt_toolkit.filters import Condition
class CLIBuffer(Buffer):
def __init__(self, always_multiline, *args, **kwargs):
self.always_multiline = always_multiline
@Condition
def is_multiline():
doc = self.document
return self.always_multiline and not _multiline_exception(doc.text)
super(self.__class__, self).__init__(*args, is_multiline=is_multiline, **kwargs)
def _multiline_exception(text):
orig = text
text = text.strip()
# Multi-statement favorite query is a special case. Because there will
# be a semicolon separating statements, we can't consider semicolon an
# EOL. Let's consider an empty line an EOL instead.
if text.startswith('\\fs'):
return orig.endswith('\n')
return (text.startswith('\\') or # Special Command
text.endswith(';') or # Ended with a semi-colon
(text == 'exit') or # Exit doesn't need semi-colon
(text == 'quit') or # Quit doesn't need semi-colon
(text == ':q') or # To all the vim fans out there
(text == '') # Just a plain enter without any text
)
| bsd-3-clause |
ivanlyon/exercises | kattis/k_pebblesolitaire2.py | 1 | 1050 | '''
Optimal result for pebble solitaire state
Status: Accepted
'''
PRECOMPUTED = {}
for _i in range(23):
PRECOMPUTED['-' * _i + 'o' + '-' * (22 - _i)] = 1
###############################################################################
def dfs(state):
"""Depth First Search for best possible score"""
global PRECOMPUTED
if state in PRECOMPUTED:
return PRECOMPUTED[state]
score = sum([1 for i in state if i == 'o'])
for i in range(21):
if state[i:i + 3] == 'oo-':
score = min(score, dfs(state[:i] + '--o' + state[i + 3:]))
if state[i:i + 3] == '-oo':
score = min(score, dfs(state[:i] + 'o--' + state[i + 3:]))
PRECOMPUTED[state] = score
return score
###############################################################################
def main():
"""Read input and print output"""
for _ in range(int(input())):
print(dfs(input()))
###############################################################################
if __name__ == '__main__':
main()
| mit |
SanchayanMaity/gem5 | configs/example/garnet_synth_traffic.py | 10 | 5950 | # Copyright (c) 2016 Georgia Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Author: Tushar Krishna
import m5
from m5.objects import *
from m5.defines import buildEnv
from m5.util import addToPath
import os, optparse, sys
addToPath('../')
from common import Options
from ruby import Ruby
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
Options.addNoISAOptions(parser)
parser.add_option("--synthetic", type="choice", default="uniform_random",
choices=['uniform_random', 'tornado', 'bit_complement', \
'bit_reverse', 'bit_rotation', 'neighbor', \
'shuffle', 'transpose'])
parser.add_option("-i", "--injectionrate", type="float", default=0.1,
metavar="I",
help="Injection rate in packets per cycle per node. \
Takes decimal value between 0 to 1 (eg. 0.225). \
Number of digits after 0 depends upon --precision.")
parser.add_option("--precision", type="int", default=3,
help="Number of digits of precision after decimal point\
for injection rate")
parser.add_option("--sim-cycles", type="int", default=1000,
help="Number of simulation cycles")
parser.add_option("--num-packets-max", type="int", default=-1,
help="Stop injecting after --num-packets-max.\
Set to -1 to disable.")
parser.add_option("--single-sender-id", type="int", default=-1,
help="Only inject from this sender.\
Set to -1 to disable.")
parser.add_option("--single-dest-id", type="int", default=-1,
help="Only send to this destination.\
Set to -1 to disable.")
parser.add_option("--inj-vnet", type="int", default=-1,
help="Only inject in this vnet (0, 1 or 2).\
0 and 1 are 1-flit, 2 is 5-flit.\
Set to -1 to inject randomly in all vnets.")
#
# Add the ruby specific and protocol specific options
#
Ruby.define_options(parser)
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if options.inj_vnet > 2:
print "Error: Injection vnet %d should be 0 (1-flit), 1 (1-flit) \
or 2 (5-flit) or -1 (random)"\
% (options.inj_vnet)
sys.exit(1)
cpus = [ GarnetSyntheticTraffic(
num_packets_max=options.num_packets_max,
single_sender=options.single_sender_id,
single_dest=options.single_dest_id,
sim_cycles=options.sim_cycles,
traffic_type=options.synthetic,
inj_rate=options.injectionrate,
inj_vnet=options.inj_vnet,
precision=options.precision,
num_dest=options.num_dirs) \
for i in xrange(options.num_cpus) ]
# create the desired simulated system
system = System(cpu = cpus, mem_ranges = [AddrRange(options.mem_size)])
# Create a top-level voltage domain and clock domain
system.voltage_domain = VoltageDomain(voltage = options.sys_voltage)
system.clk_domain = SrcClockDomain(clock = options.sys_clock,
voltage_domain = system.voltage_domain)
Ruby.create_system(options, False, system)
# Create a seperate clock domain for Ruby
system.ruby.clk_domain = SrcClockDomain(clock = options.ruby_clock,
voltage_domain = system.voltage_domain)
i = 0
for ruby_port in system.ruby._cpu_ports:
#
# Tie the cpu test ports to the ruby cpu port
#
cpus[i].test = ruby_port.slave
i += 1
# -----------------------
# run simulation
# -----------------------
root = Root(full_system = False, system = system)
root.system.mem_mode = 'timing'
# Not much point in this being higher than the L1 latency
m5.ticks.setGlobalFrequency('1ns')
# instantiate configuration
m5.instantiate()
# simulate until program terminates
exit_event = m5.simulate(options.abs_max_tick)
print 'Exiting @ tick', m5.curTick(), 'because', exit_event.getCause()
| bsd-3-clause |
hfp/libxsmm | samples/deeplearning/sparse_training/fairseq/fairseq/models/nat/iterative_nonautoregressive_transformer.py | 1 | 8426 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
# s: input batch
# V: vocabulary size
rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
choices = torch.rand(size=s.size(), device=s.device)
choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
replace = choices < beta / 3
repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
swap = (choices >= beta * 2 / 3) & (choices < beta)
safe = choices >= beta
for i in range(s.size(1) - 1):
rand_word = rand_words[:, i]
next_word = s[:, i + 1]
self_word = s[:, i]
replace_i = replace[:, i]
swap_i = swap[:, i] & (next_word != 3)
repeat_i = repeat[:, i] & (next_word != 3)
safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
s[:, i] = (
self_word * (safe_i | repeat_i).long()
+ next_word * swap_i.long()
+ rand_word * replace_i.long()
)
s[:, i + 1] = (
next_word * (safe_i | replace_i).long()
+ self_word * (swap_i | repeat_i).long()
)
return s
def gumbel_noise(input, TINY=1e-8):
return input.new_zeros(*input.size()).uniform_().add_(
TINY).log_().neg_().add_(TINY).log_().neg_()
@register_model("iterative_nonautoregressive_transformer")
class IterNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument("--train-step", type=int,
help="number of refinement iterations during training")
parser.add_argument("--dae-ratio", type=float,
help="the probability of switching to the denoising auto-encoder loss")
parser.add_argument("--stochastic-approx", action="store_true",
help="sampling from the decoder as the inputs for next iteration")
@classmethod
def build_model(cls, args, task):
model = super().build_model(args, task)
model.train_step = getattr(args, "train_step", 4)
model.dae_ratio = getattr(args, "dae_ratio", 0.5)
model.stochastic_approx = getattr(args, "stochastic_approx", False)
return model
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
B, T = prev_output_tokens.size()
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out)
length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens)
# decoding
word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
for t in range(self.train_step):
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
step=t,
)
word_ins_tgt = tgt_tokens
word_ins_mask = word_ins_tgt.ne(self.pad)
word_ins_outs.append(word_ins_out)
word_ins_tgts.append(word_ins_tgt)
word_ins_masks.append(word_ins_mask)
if t < (self.train_step - 1):
# prediction for next iteration
if self.stochastic_approx:
word_ins_prediction = (
word_ins_out + gumbel_noise(word_ins_out)
).max(-1)[1]
else:
word_ins_prediction = word_ins_out.max(-1)[1]
prev_output_tokens = prev_output_tokens.masked_scatter(
word_ins_mask, word_ins_prediction[word_ins_mask]
)
if self.dae_ratio > 0:
# we do not perform denoising for the first iteration
corrputed = (
torch.rand(size=(B,), device=prev_output_tokens.device)
< self.dae_ratio
)
corrputed_tokens = _sequential_poisoning(
tgt_tokens[corrputed],
len(self.tgt_dict),
0.33,
self.bos,
self.eos,
self.pad,
)
prev_output_tokens[corrputed] = corrputed_tokens
# concat everything
word_ins_out = torch.cat(word_ins_outs, 0)
word_ins_tgt = torch.cat(word_ins_tgts, 0)
word_ins_mask = torch.cat(word_ins_masks, 0)
return {
"word_ins": {
"out": word_ins_out, "tgt": word_ins_tgt,
"mask": word_ins_mask, "ls": self.args.label_smoothing,
"nll_loss": True
},
"length": {
"out": length_out, "tgt": length_tgt,
"factor": self.decoder.length_loss_factor
}
}
@register_model_architecture(
"iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
)
def inat_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.train_step = getattr(args, "train_step", 4)
args.dae_ratio = getattr(args, "dae_ratio", 0.5)
args.stochastic_approx = getattr(args, "stochastic_approx", False)
@register_model_architecture(
"iterative_nonautoregressive_transformer",
"iterative_nonautoregressive_transformer_wmt_en_de",
)
def iter_nat_wmt_en_de(args):
inat_base_architecture(args)
| bsd-3-clause |
Jesus-21/addic7ed | addic7ed/logger.py | 1 | 1269 | from os import makedirs
from os.path import expanduser, exists
from logging import getLogger, Formatter, StreamHandler, DEBUG, WARN
from logging.handlers import RotatingFileHandler
from termcolor import colored
LOG_COLORS = {
"DEBUG": "grey",
"INFO": "cyan",
"WARNING": "yellow",
"ERROR": "magenta",
"CRITICAL": "red"
}
def init_logger():
logger = getLogger("addic7ed")
logger.setLevel(DEBUG)
directory = "%s/.config/addic7ed/" % expanduser("~")
if not exists(directory):
makedirs(directory)
fh = RotatingFileHandler("%s%s" % (directory, "addic7ed.log"))
fh.setLevel(DEBUG)
sh = StreamHandler()
sh.setLevel(WARN)
fcolor = "%s - %s" % (colored("%(asctime)s", "green"),
"%(levelname)7s - %(name)s - %(message)s")
formatter_color = ColoredFormatter(fcolor)
formatter = Formatter(("%(asctime)s - %(levelname)7s - "
"%(name)s - %(message)s"))
fh.setFormatter(formatter)
sh.setFormatter(formatter_color)
logger.addHandler(fh)
logger.addHandler(sh)
class ColoredFormatter(Formatter):
def format(self, record):
record.msg = colored(record.msg, LOG_COLORS[record.levelname])
return super().format(record)
| mit |
undoware/neutron-drive | neutron-drive/oauth2client/gce.py | 45 | 3056 | # Copyright (C) 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
import httplib2
import logging
import uritemplate
from oauth2client import util
from oauth2client.anyjson import simplejson
from oauth2client.client import AccessTokenRefreshError
from oauth2client.client import AssertionCredentials
logger = logging.getLogger(__name__)
# URI Template for the endpoint that returns access_tokens.
META = ('http://metadata.google.internal/0.1/meta-data/service-accounts/'
'default/acquire{?scope}')
class AppAssertionCredentials(AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be used
for the purpose of accessing data stored under an account assigned to the
Compute Engine instance itself.
This credential does not require a flow to instantiate because it represents
a two legged flow, and therefore has all of the required information to
generate and refresh its own access tokens.
"""
@util.positional(2)
def __init__(self, scope, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
scope: string or list of strings, scope(s) of the credentials being
requested.
"""
if type(scope) is list:
scope = ' '.join(scope)
self.scope = scope
super(AppAssertionCredentials, self).__init__(
'ignored' # assertion_type is ignore in this subclass.
)
@classmethod
def from_json(cls, json):
data = simplejson.loads(json)
return AppAssertionCredentials(data['scope'])
def _refresh(self, http_request):
"""Refreshes the access_token.
Skip all the storage hoops and just refresh using the API.
Args:
http_request: callable, a callable that matches the method signature of
httplib2.Http.request, used to make the refresh request.
Raises:
AccessTokenRefreshError: When the refresh fails.
"""
uri = uritemplate.expand(META, {'scope': self.scope})
response, content = http_request(uri)
if response.status == 200:
try:
d = simplejson.loads(content)
except StandardError, e:
raise AccessTokenRefreshError(str(e))
self.access_token = d['accessToken']
else:
raise AccessTokenRefreshError(content)
| bsd-3-clause |
pmonta/GNSS-DSP-tools | acquire-galileo-e1c.py | 1 | 3250 | #!/usr/bin/env python
import optparse
import numpy as np
import scipy.signal
import scipy.fftpack as fft
import gnsstools.galileo.e1c as e1c
import gnsstools.nco as nco
import gnsstools.io as io
import gnsstools.util as util
#
# Acquisition search
#
def search(x,prn,doppler_search,ms):
blocks = ms//4 - 1
fs = 8192000.0
n = 32768 # 4 ms coherent integration
doppler_min, doppler_max, doppler_incr = doppler_search
incr = float(e1c.code_length)/n
c = e1c.code(prn,0,0,incr,n) # obtain samples of the E1-C code
boc = nco.boc11(0,0,incr,n)
c = fft.fft(np.concatenate((c*boc,np.zeros(n))))
m_metric,m_code,m_doppler = 0,0,0
for doppler in np.arange(doppler_min,doppler_max,doppler_incr): # doppler bins
q = np.zeros(2*n)
w = nco.nco(-doppler/fs,0,2*n)
for block in range(blocks): # incoherent sums
b = x[(block*n):((block+2)*n)]
b = b*w
r = fft.ifft(c*np.conj(fft.fft(b)))
q = q + np.absolute(r)
idx = np.argmax(q)
if q[idx]>m_metric:
m_metric = q[idx]
m_code = e1c.code_length*(float(idx)/n)
m_doppler = doppler
m_code = m_code%e1c.code_length
return m_metric,m_code,m_doppler
#
# main program
#
parser = optparse.OptionParser(usage="""acquire-galileo-e1c.py [options] input_filename sample_rate carrier_offset
Acquire Galileo E1C signals
Examples:
Acquire all Galileo PRNs using standard input with sample rate 69.984 MHz and carrier offset -9.334875 MHz:
acquire-gps-e1c.py /dev/stdin 69984000 -9334875
Arguments:
input_filename input data file, i/q interleaved, 8 bit signed
sample_rate sampling rate in Hz
carrier_offset offset to E1 carrier in Hz (positive or negative)""")
parser.disable_interspersed_args()
parser.add_option("--prn", default="1-50", help="PRNs to search, e.g. 1,3,7-14,31 (default %default)")
parser.add_option("--doppler-search", metavar="MIN,MAX,INCR", default="-9000,9000,50", help="Doppler search grid: min,max,increment (default %default)")
parser.add_option("--time", type="int", default=80, help="integration time in milliseconds (default %default)")
(options, args) = parser.parse_args()
filename = args[0]
fs = float(args[1])
coffset = float(args[2])
prns = util.parse_list_ranges(options.prn)
doppler_search = util.parse_list_floats(options.doppler_search)
ms = options.time
# read first portion of file
ms_pad = ms + 5
n = int(fs*0.001*ms_pad)
fp = open(filename,"rb")
x = io.get_samples_complex(fp,n)
# resample to 8.192 MHz
fsr = 8192000.0/fs
nco.mix(x,-coffset/fs,0)
h = scipy.signal.firwin(161,4e6/(fs/2),window='hanning')
x = scipy.signal.filtfilt(h,[1],x)
xr = np.interp((1/fsr)*np.arange(ms_pad*8192),np.arange(len(x)),np.real(x))
xi = np.interp((1/fsr)*np.arange(ms_pad*8192),np.arange(len(x)),np.imag(x))
x = xr+(1j)*xi
# iterate (in parallel) over PRNs of interest
def worker(p):
x,prn = p
metric,code,doppler = search(x,prn,doppler_search,ms)
return 'prn %2d doppler % 7.1f metric % 7.1f code_offset %6.1f' % (prn,doppler,metric,code)
import multiprocessing as mp
cpus = mp.cpu_count()
results = mp.Pool(cpus).map(worker, map(lambda prn: (x,prn),prns))
for r in results:
print(r)
| mit |
maxcountryman/flask-simpleoauth | flask_simpleoauth/frontend.py | 1 | 2309 | from flask import (Blueprint, flash, g, redirect, render_template, request,
session, url_for)
from .form import ConsumerForm, LoginForm, UserForm
from .model import Consumer, User
from .utils import login_required
frontend = Blueprint('frontend', __name__)
@frontend.route('/')
def index():
return render_template('index.html')
@frontend.route('/user/new/', methods=['GET', 'POST'])
def new_user():
form = UserForm(request.form)
if request.method == 'POST' and form.validate():
user = User(login=form.login.data)
user.set_password(form.password.data)
user.save()
flash('User created')
return redirect(url_for('.index'))
return render_template('new_user.html', form=form)
@frontend.route('/login/', methods=['GET', 'POST'])
def login():
next_url = request.args.get('next_url', url_for('.index'))
form = LoginForm(request.form)
if request.method == 'POST' and form.validate():
user = User.objects(login=form.login.data).first()
if user is not None and user.check_password(form.password.data):
session['user_id'] = user.id
return redirect(next_url)
return render_template('login.html', form=form)
@frontend.route('/logout/')
def logout():
next_url = request.args.get('next_url', url_for('.login'))
session.pop('login')
return redirect(next_url)
@frontend.route('/apps/new/', methods=['GET', 'POST'])
@login_required
def apps_new():
'''Registers a new OAuth consumer application with the provider.'''
form = ConsumerForm(request.form)
if request.method == 'POST' and form.validate():
consumer = Consumer(name=form.name.data,
callback_uri=form.callback_uri.data)
consumer.save()
g.user.owned_consumers.append(consumer.id)
g.user.save()
args = {'key': consumer.key, 'secret': consumer.secret}
flash('Consumer created. Key {key} Secret {secret}'.format(**args))
owned_consumers = []
for consumer_id in g.user.owned_consumers:
consumer = Consumer.objects.with_id(consumer_id)
owned_consumers.append(consumer)
return render_template('new_app.html',
form=form,
owned_consumers=owned_consumers)
| bsd-3-clause |
NullInfinity/socman | socman.py | 2 | 17375 | """
socman is a society membership and event management library.
It tracks members in a sqlite database to enable membership tracking
and verification. It can also track event data such as name and date,
along with event attendance statistics.
The MIT License (MIT)
Copyright (c) 2016 Alexander Thorne
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import collections
import csv
from datetime import date, datetime
import sqlite3
class Error(Exception):
"""The base class for exceptions in socman."""
def __init__(self, *args):
"""Create an Error object."""
Exception.__init__(self, *args)
class BadMemberError(Error):
"""Raised when a bad (typically None) member is passed.
Attributes:
member: the bad member object
"""
def __init__(self, member, *args):
"""Create a BadMemberError for member object `member`."""
Error.__init__(self, *args)
self.member = member
class IncompleteMemberError(Error):
"""Raised when an incomplete member is passed.
A member is incomplete unless it has both barcode and name.
Attributes:
member: the incomplete member object
"""
def __init__(self, member, *args):
"""Create a IncompleteMemberError for member object `member`."""
Error.__init__(self, *args)
self.member = member
class MemberNotFoundError(Error):
"""Raised when a member is not found in the database.
Attribues:
member: the member object
"""
def __init__(self, member, *args):
"""Create a MemberNotFoundError for member object `member`."""
Error.__init__(self, *args)
self.member = member
class Name:
"""A person's name.
Attributes:
names: a list of strings representing names
sep: the separator string to be used when concatenating names
"""
def __init__(self, *names, sep=' '):
"""Create a name from a tuple of strings (passed as variable arguments).
Arguments:
names: A list of names which should be strings (or None)
sep: The separator used when concatenating parts of the name.
To create a name with just a first name, pass None as the last name:
>>> my_name = Name('Ted', None) # Ted is first name
>>> my_name = Name('Ted') # Ted is last name
The full name string will be 'Ted' in both cases above.
Similarly, to create a name with just a middle name, pass
None as both the first and the last name:
>>> my_name = Name(None, 'Ted', None)
Any number of None names may be passed, as they are ignored when
building name strings (except for place holding first/last names).
"""
# None names in list have semantic value, but if list contains only
# Nones then the Name constructed should be identical to the empty
# Name constructed by Name()
if not [name for name in names if name is not None]:
names = ()
self.names = list(names)
self.sep = sep
def __bool__(self):
return bool(self.names)
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def __makestr(self, names):
"""Return arguments concatenated together separated by `self.sep`.
Arguments that equal `None` or are entirely whitespace are omitted.
"""
return self.sep.join([name for name in names if name and name.strip()])
def first(self):
"""Return first name as a string."""
return self.__makestr(self.names[:-1][:1])
def middle(self):
"""Return middle names concatenated as a string."""
return self.__makestr(self.names[1:-1])
def given(self):
"""Return first and any middle names concatenated as a string."""
return self.__makestr(self.names[:-1])
def last(self):
"""Return last name as a string."""
return self.__makestr(self.names[-1:])
def full(self):
"""Return full name as a string."""
return self.__makestr(self.names)
Member = collections.namedtuple('Member', 'barcode name college')
Member.__new__.__defaults__ = (None, None)
Member.__doc__ = """
A society member.
`name` and `college` default to None but `barcode` must be
given explicitly.
"""
class MemberDatabase:
"""Interface to a SQLite3 database of members."""
class BadSearchAuthorityError(Error):
"""Raised when a bad search authority string is passed.
The only permissible strings are 'barcode' and 'name'.
Attributes:
authority: the bad authority string
"""
def __init__(self, authority_string, *args):
"""Create a BadSearchAuthorityError for a given authority string.
Arguments:
authority_string: the bad authority string
"""
Error.__init__(self, *args)
self.authority_string = authority_string
def __init__(self, db_file='members.db', safe=True):
"""Create a MemberDatabase.
Arguments:
db_file: Filename and path of a SQLite3 database file.
Passed directly to sqlite3.Connect().
safe: Boolean that determines whether non essential
operations are committed immediately or not
Note that important operations like adding a member
are always committed regardless of this setting.
"""
self.__connection = sqlite3.connect(db_file)
self.__safe = safe
def __del__(self):
self.__connection.commit() # here, commit regardless of safe
self.__connection.close()
def optional_commit(self):
"""Commit changes to database if `safe` is set to `True`.
This means increased performance can be chosen over the highest level
of write safety. For example, by default a timestamp is updated in the
database every time a record is accessed. If a lot of members are
checked, it may not be desirable to commit after every such change.
"""
if self.__safe:
self.__connection.commit()
def __sql_build_name_value_pairs(self, member, sep):
columns = []
values = ()
if member.name.first():
columns += ['firstName=?']
values += (member.name.first(), )
if member.name.last():
columns += ['lastName=?']
values += (member.name.last(), )
if not columns:
# TODO remove None return or replace with exception if necessary
return None, None
return sep.join(columns), values
def __sql_search_phrase(self, member, authority='barcode'):
if authority == 'barcode':
return self.__sql_search_barcode_phrase(member)
elif authority == 'name':
return self.__sql_search_name_phrase(member)
raise MemberDatabase.BadSearchAuthorityError
def __sql_search_barcode_phrase(self, member):
return 'barcode=?', (member.barcode, )
def __sql_search_name_phrase(self, member):
return self.__sql_build_name_value_pairs(member, ' AND ')
def __sql_update_phrase(self, member, authority):
# authority='barcode' yields name update query and vice versa
# if the barcode is authoritative, it is the name we should update
if authority == 'barcode':
update_phrase = self.__sql_build_name_value_pairs(member, sep=',')
elif authority == 'name':
update_phrase = ('barcode=?', (member.barcode,))
else:
raise MemberDatabase.BadSearchAuthorityError
return self.__join_sql_cmds(update_phrase,
(',updated_at=?', (datetime.utcnow(), )))
def __join_sql_cmds(self, *cmds):
phrase_list, values_list = zip(*cmds)
return (''.join(phrase_list),
tuple(value for values in values_list for value in values))
def __update_timestamp(self, member, authority='barcode'):
"""Update member last_attended date."""
self.__connection.cursor().execute(*self.__join_sql_cmds(
('UPDATE users SET last_attended=? WHERE ', (date.today(),)),
self.__sql_search_phrase(member, authority)
))
def __autofix(self, member, authority='barcode'):
if member.barcode and member.name:
self.__connection.cursor().execute(*self.__join_sql_cmds(
('UPDATE users SET ', ()),
self.__sql_update_phrase(member, authority),
(' WHERE ', ()),
self.__sql_search_phrase(member, authority)
))
def get_member(self, member, update_timestamp=True, autofix=False):
"""Retrieve a member's names from the database.
Arguments:
member: a member object to search for, should contain either
barcode, name or both
update_timestamp: determines whether to update the record's
timestamp in the database when retrieved
autofix: determines whether to fix broken records (see below)
Returns:
A tuple containing the member's first and last names as strings.
Raises:
MemberNotFoundError: A member was not found in a database lookup
BadMemberError: The member passed to `get_member` has neither name
nor barcode.
The Lookup
----------
The barcode always takes precedence where possible. If both a name and
barcode are supplied in `member` then the lookup will be done by
barcode, and only if that lookup fails will the name be used.
If only a name is provided, it will of course be used for lookup.
Autofixing
----------
If the barcode lookup succeeds and a name is also provided, the autofix
mechanism will update the name in the database record using the name
provided. Similarly, if barcode lookup fails but name lookup succeeds,
then the barcode will be updated on any records found.
Duplicate Records
-----------------
At this time, `get_member` does not attempt to autofix duplicate
records. This should be implemented at a future date.
"""
cursor = self.__connection.cursor()
if not member or not (member.barcode or member.name):
raise BadMemberError(member)
search_authority = None
# first try to find member by barcode, if possible
if member.barcode:
cursor.execute(*self.__join_sql_cmds(
('SELECT firstName,lastName FROM users WHERE ', ()),
self.__sql_search_barcode_phrase(member)
))
users = cursor.fetchall()
if users:
search_authority = 'barcode'
# barcode lookup failed; now try finding by name
if not search_authority and member.name:
cursor.execute(*self.__join_sql_cmds(
('SELECT firstName,lastName FROM users WHERE ', ()),
self.__sql_search_name_phrase(member)
))
users = cursor.fetchall()
if users:
search_authority = 'name'
if not search_authority:
raise MemberNotFoundError(member)
if update_timestamp:
self.__update_timestamp(member, authority=search_authority)
if autofix:
self.__autofix(member, authority=search_authority)
if autofix or update_timestamp:
self.optional_commit()
# TODO dedupe if necessary
return users[0]
def __sql_add_query(self, member):
barcode = member.barcode
if not barcode:
barcode = ''
name = member.name
if not name:
name = Name()
college = member.college
if not college:
college = ''
return ("""INSERT INTO users (barcode, firstName, """
"""lastName, college, """
"""datejoined, created_at, updated_at, last_attended) """
"""VALUES (?, ?, ?, ?, ?, ?, ?, ?)""",
(barcode, name.first(), name.last(), college,
date.today(), datetime.utcnow(),
datetime.utcnow(), date.today()))
def add_member(self, member):
"""Add a member to the database.
First the member is looked up with `get_member`. If found, autofixing
is applied and then the member is returned. If the member is not found,
he/she will be added to the database.
See `get_member` for details of the lookup and autofix mechanism.
Arguments:
member: a member object to add, should contain either at least
one of a name and a barcode
Returns:
Nothing.
Raises:
BadMemberError: The member passed to `get_member` has neither name
nor barcode.
"""
if not member or not (member.barcode or member.name):
raise BadMemberError(member)
try:
self.get_member(member=member, update_timestamp=True, autofix=True)
except MemberNotFoundError:
pass
else:
return # member already present so we are done
# if member does not exist, add him/her
cursor = self.__connection.cursor()
cursor.execute(*self.__sql_add_query(member))
# direct commit here: don't want to lose new member data
self.__connection.commit()
def update_member(self, member, authority='barcode', update_timestamp=True):
"""Update the record for a member already in the database.
Internally, once the necessary checks have been performed on `member`
and `authority`, `update_member` simply calls `add_member` with
`autofix` enabled and `update_timestamp` set appropriately.
Arguments:
member: a member object to update, must have name and barcode
authority: string set to either 'name' or 'barcode': specifies
which should be assumed to be correct (the other will
be updated)
update_timestamp: whether the last_attended date should be updated
(passed to `add_member`)
Returns:
Nothing.
Raises:
BadMemberError: `member` has neither name nor barcode or is `None`
IncompleteMemberError: `member` does not have both name and barcode
MemberNotFoundError: `member` is not in the database
"""
if not member or not (member.barcode or member.name):
raise BadMemberError(member)
if not (member.barcode and member.name):
raise IncompleteMemberError(member)
# if member is not in database, get_member raises a MemberNotFoundError
# this also updates the member timestamp for us if desired
self.get_member(member, update_timestamp=update_timestamp)
self.__autofix(member, authority=authority)
def member_count(self):
"""Return the number of members in the database.
This is the number of rows in the `users` table.
"""
cursor = self.__connection.cursor()
cursor.execute('SELECT COUNT(*) FROM users')
return int(cursor.fetchone()[0])
def write_csv(self, csv_filename):
"""Write the entire database to a CSV file."""
csv_writer = csv.writer(open(csv_filename, 'w'))
cursor = self.__connection.cursor()
cursor.execute('SELECT * FROM users')
csv_writer.writerow([
'id',
'firstName',
'lastName',
'barcode',
'datejoined',
'created_at',
'updated_at',
'college',
'last_attended',
'unpaid'
])
csv_writer.writerows(cursor.fetchall())
| mit |
wbond/subversion | tools/examples/dumpprops.py | 7 | 2292 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# USAGE: dumprops.py [-r REV] repos-path [file]
#
# dump out the properties on a given path (recursively if given a dir)
#
import sys
import os
import getopt
try:
my_getopt = getopt.gnu_getopt
except AttributeError:
my_getopt = getopt.getopt
import pprint
from svn import fs, core, repos
def dumpprops(path, filename='', rev=None):
path = core.svn_path_canonicalize(path)
repos_ptr = repos.open(path)
fsob = repos.fs(repos_ptr)
if rev is None:
rev = fs.youngest_rev(fsob)
root = fs.revision_root(fsob, rev)
print_props(root, filename)
if fs.is_dir(root, filename):
walk_tree(root, filename)
def print_props(root, path):
raw_props = fs.node_proplist(root, path)
# need to massage some buffers into strings for printing
props = { }
for key, value in raw_props.items():
props[key] = str(value)
print('--- %s' % path)
pprint.pprint(props)
def walk_tree(root, path):
for name in fs.dir_entries(root, path).keys():
full = path + '/' + name
print_props(root, full)
if fs.is_dir(root, full):
walk_tree(root, full)
def usage():
print("USAGE: dumpprops.py [-r REV] repos-path [file]")
sys.exit(1)
def main():
opts, args = my_getopt(sys.argv[1:], 'r:')
rev = None
for name, value in opts:
if name == '-r':
rev = int(value)
if len(args) == 2:
dumpprops(args[0], args[1], rev)
elif len(args) == 1:
dumpprops(args[0], "", rev)
else:
usage()
if __name__ == '__main__':
main()
| apache-2.0 |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/utilities/message_filters/test/test_approxsync.py | 1 | 3888 | #!/usr/bin/env python
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Willow Garage nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import rostest
import rospy
import unittest
import random
import message_filters
from message_filters import ApproximateTimeSynchronizer
class MockHeader:
pass
class MockMessage:
def __init__(self, stamp, data):
self.header = MockHeader()
self.header.stamp = stamp
self.data = data
class MockFilter(message_filters.SimpleFilter):
pass
class TestApproxSync(unittest.TestCase):
def cb_collector_2msg(self, msg1, msg2):
self.collector.append((msg1, msg2))
def test_approx(self):
m0 = MockFilter()
m1 = MockFilter()
ts = ApproximateTimeSynchronizer([m0, m1], 1, 0.1)
ts.registerCallback(self.cb_collector_2msg)
if 0:
# Simple case, pairs of messages, make sure that they get combined
for t in range(10):
self.collector = []
msg0 = MockMessage(t, 33)
msg1 = MockMessage(t, 34)
m0.signalMessage(msg0)
self.assertEqual(self.collector, [])
m1.signalMessage(msg1)
self.assertEqual(self.collector, [(msg0, msg1)])
# Scramble sequences of length N. Make sure that TimeSequencer recombines them.
random.seed(0)
for N in range(1, 10):
m0 = MockFilter()
m1 = MockFilter()
seq0 = [MockMessage(rospy.Time(t), random.random()) for t in range(N)]
seq1 = [MockMessage(rospy.Time(t), random.random()) for t in range(N)]
# random.shuffle(seq0)
ts = ApproximateTimeSynchronizer([m0, m1], N, 0.1)
ts.registerCallback(self.cb_collector_2msg)
self.collector = []
for msg in random.sample(seq0, N):
m0.signalMessage(msg)
self.assertEqual(self.collector, [])
for msg in random.sample(seq1, N):
m1.signalMessage(msg)
self.assertEqual(set(self.collector), set(zip(seq0, seq1)))
if __name__ == '__main__':
if 1:
rostest.unitrun('camera_calibration', 'testapproxsync', TestApproxSync)
else:
suite = unittest.TestSuite()
suite.addTest(TestApproxSync('test_approx'))
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-3-clause |
gfarnadi/FairPSL | problems/performance_review/data/hierachy_generation.py | 1 | 3985 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import os, random
import queue
# In[2]:
def hierachy_generator(k, size):
index = 0
hierachy_dict = {}
sub_ordinates_dict = {}
employees = []
root = 'e'+str(index)
index+=1
employees.append(root)
hierachy_dict[root] = []
sub_ordinates_dict[root] = []
return_nodes = queue.Queue()
return_nodes.put(root)
while index<size:
node = return_nodes.get()
hierachy_dict, sub_ordinates_dict, employees, return_nodes, index = generate_sub_ordinates(hierachy_dict, sub_ordinates_dict, employees, node, return_nodes, k , index)
return hierachy_dict, sub_ordinates_dict, employees, index
# In[3]:
def generate_sub_ordinates(hierachy_dict, sub_ordinates_dict, employees, node, return_nodes, k , index):
while len(hierachy_dict[node])<k:
node_new = 'e'+str(index)
employees.append(node_new)
index+=1
return_nodes.put(node_new)
hierachy_dict[node].append(node_new)
hierachy_dict[node_new] = []
sub_ordinates_dict[node_new] = [node]
for m in sub_ordinates_dict[node]:
if m in sub_ordinates_dict[node_new]:continue
if m==node_new: continue
sub_ordinates_dict[node_new].append(m)
return hierachy_dict, sub_ordinates_dict, employees, return_nodes, index
# In[4]:
def generate_label(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta):
junior_employees = []
with open(manager_file, 'w') as mf:
for e, ms in sub_ordinates_dict.items():
for m in ms:
print('%s\t%s'%(m,e), file=mf)
for m,emp in hierachy_dict.items():
if len(emp)==0:
junior_employees.append(m)
size_A = 0
with open(label_file, 'w') as lf:
for e in junior_employees:
if random.random()<delta:
print('%s\tA'%(e), file=lf)
size_A+=1
else:
print('%s\tB'%(e), file=lf)
for e in employees:
if e in junior_employees:continue
print('%s\tB'%(e), file=lf)
print(size_A)
with open(employees_file, 'w') as ef:
for e in employees:
print('%s'%(e), file = ef)
# In[66]:
def generate_label_2(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta):
junior_employees = []
with open(manager_file, 'w') as mf:
for e, ms in sub_ordinates_dict.items():
for m in ms:
print('%s\t%s'%(m,e), file=mf)
for m,emp in hierachy_dict.items():
if len(emp)==0:
junior_employees.append(m)
size_A = 0
with open(label_file, 'w') as lf:
for e in employees:
if random.random()<0.5:
print('%s\tA'%(e), file=lf)
size_A+=1
else:
print('%s\tB'%(e), file=lf)
print(size_A)
with open(employees_file, 'w') as ef:
for e in employees:
print('%s'%(e), file = ef)
# In[102]:
def run_generator(k, size, manager_file, label_file, employees_file, delta):
hierachy_dict, sub_ordinates_dict, employees, index = hierachy_generator(k, size)
print(len(employees))
print(len(hierachy_dict.keys()))
print(len(sub_ordinates_dict.keys()))
#glass cieling
#generate_label(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta)
#uniform
generate_label_2(hierachy_dict, sub_ordinates_dict, employees, manager_file, label_file, employees_file, delta)
# In[111]:
k = 5
size = 100
manager_file = '../data/test/manager.txt'
label_file = '../data/test/label.txt'
employees_file = '../data/test/employee.txt'
delta = 0.7
run_generator(k, size, manager_file, label_file, employees_file, delta)
# In[ ]:
# In[ ]:
| mit |
Coelhon/MasterRepo.repository | plugin.video.specto/resources/lib/resolvers/thevideo.py | 10 | 1467 | # -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,ast
from resources.lib.libraries import client
def resolve(url):
return
try:
url = url.replace('/embed-', '/')
url = re.compile('//.+?/([\w]+)').findall(url)[0]
url = 'http://thevideo.me/embed-%s.html' % url
result = client.request(url)
result = result.replace('\n','')
url = re.compile("sources *: *(\[.+?\])").findall(result)[-1]
url = re.compile('file *: *[\'|\"](.+?)[\'|\"]').findall(url)[-1]
return url
except:
return
def check(url):
try:
result = client.request(url)
if result == None: return False
if 'File Deleted.' in result: return False
return True
except:
return False
| gpl-2.0 |
marshally/aubio | python/aubio/task/task.py | 13 | 1409 | from aubio.aubioclass import *
from params import taskparams
class task(taskparams):
""" default template class to apply tasks on a stream """
def __init__(self,input,output=None,params=None):
""" open the input file and initialize default argument
parameters should be set *before* calling this method.
"""
import time
self.tic = time.time()
if params == None: self.params = taskparams()
else: self.params = params
self.frameread = 0
self.readsize = self.params.hopsize
self.input = input
self.filei = sndfile(self.input)
self.srate = self.filei.samplerate()
self.params.step = float(self.params.hopsize)/float(self.srate)
self.myvec = fvec(self.params.hopsize)
self.output = output
def __call__(self):
self.readsize = self.filei.read(self.params.hopsize,self.myvec)
self.frameread += 1
def compute_all(self):
""" Compute data """
mylist = []
while(self.readsize==self.params.hopsize):
tmp = self()
if tmp:
mylist.append(tmp)
if self.params.verbose:
self.fprint(tmp)
return mylist
def fprint(self,foo):
print foo
def eval(self,results):
""" Eval data """
pass
def plot(self):
""" Plot data """
pass
def time(self):
import time
#print "CPU time is now %f seconds," % time.clock(),
#print "task execution took %f seconds" % (time.time() - self.tic)
return time.time() - self.tic
| gpl-3.0 |
Anonymouslemming/ansible | lib/ansible/module_utils/eos.py | 24 | 16409 | #
# This code is part of Ansible, but is an independent component.
#
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat, Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
from ansible.module_utils._text import to_text, to_native
from ansible.module_utils.basic import env_fallback, return_values
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network_common import to_list, ComplexList
from ansible.module_utils.six import iteritems
from ansible.module_utils.urls import fetch_url
_DEVICE_CONNECTION = None
eos_argument_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME']), aliases=['name']),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
'auth_pass': dict(no_log=True, fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS'])),
'use_ssl': dict(type='bool'),
'validate_certs': dict(type='bool'),
'timeout': dict(type='int'),
'provider': dict(type='dict'),
'transport': dict(choices=['cli', 'eapi'])
}
# Add argument's default value here
ARGS_DEFAULT_VALUE = {
'transport': 'cli',
'use_ssl': True,
'validate_certs': True
}
def get_argspec():
return eos_argument_spec
def check_args(module, warnings):
provider = module.params['provider'] or {}
for key in eos_argument_spec:
if module._name == 'eos_user':
if (key not in ['username', 'password', 'provider', 'transport', 'authorize'] and
module.params[key]):
warnings.append('argument %s has been deprecated and will be removed in a future version' % key)
else:
if key not in ['provider', 'authorize'] and module.params[key]:
warnings.append('argument %s has been deprecated and will be removed in a future version' % key)
# set argument's default value if not provided in input
# This is done to avoid unwanted argument deprecation warning
# in case argument is not given as input (outside provider).
for key in ARGS_DEFAULT_VALUE:
if not module.params.get(key, None):
module.params[key] = ARGS_DEFAULT_VALUE[key]
if provider:
for param in ('auth_pass', 'password'):
if provider.get(param):
module.no_log_values.update(return_values(provider[param]))
def load_params(module):
provider = module.params.get('provider') or dict()
for key, value in iteritems(provider):
if key in eos_argument_spec:
if module.params.get(key) is None and value is not None:
module.params[key] = value
def get_connection(module):
global _DEVICE_CONNECTION
if not _DEVICE_CONNECTION:
load_params(module)
if is_eapi(module):
conn = Eapi(module)
else:
conn = Cli(module)
_DEVICE_CONNECTION = conn
return _DEVICE_CONNECTION
class Cli:
def __init__(self, module):
self._module = module
self._device_configs = {}
self._session_support = None
@property
def supports_sessions(self):
if self._session_support is not None:
return self._session_support
rc, out, err = self.exec_command('show configuration sessions')
self._session_support = rc == 0
return self._session_support
def exec_command(self, command):
if isinstance(command, dict):
command = self._module.jsonify(command)
return exec_command(self._module, command)
def check_authorization(self):
for cmd in ['show clock', 'prompt()']:
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
return out.endswith('#')
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
conn = get_connection(self)
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
cfg = str(out).strip()
self._device_configs[cmd] = cfg
return cfg
def run_commands(self, commands, check_rc=True):
"""Run list of commands on remote device and return results
"""
responses = list()
for cmd in to_list(commands):
rc, out, err = self.exec_command(cmd)
out = to_text(out, errors='surrogate_then_replace')
if check_rc and rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
try:
out = self._module.from_json(out)
except ValueError:
out = str(out).strip()
responses.append(out)
return responses
def send_config(self, commands):
multiline = False
rc = 0
for command in to_list(commands):
if command == 'end':
pass
if command.startswith('banner') or multiline:
multiline = True
command = self._module.jsonify({'command': command, 'sendonly': True})
elif command == 'EOF' and multiline:
multiline = False
rc, out, err = self.exec_command(command)
if rc != 0:
return (rc, out, to_text(err, errors='surrogate_then_replace'))
return (rc, 'ok', '')
def configure(self, commands):
"""Sends configuration commands to the remote device
"""
if not self.check_authorization():
self._module.fail_json(msg='configuration operations require privilege escalation')
conn = get_connection(self)
rc, out, err = self.exec_command('configure')
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
rc, out, err = self.send_config(commands)
if rc != 0:
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'))
self.exec_command('end')
return {}
def load_config(self, commands, commit=False, replace=False):
"""Loads the config commands onto the remote device
"""
if not self.check_authorization():
self._module.fail_json(msg='configuration operations require privilege escalation')
use_session = os.getenv('ANSIBLE_EOS_USE_SESSIONS', True)
try:
use_session = int(use_session)
except ValueError:
pass
if not all((bool(use_session), self.supports_sessions)):
return self.configure(self, commands)
conn = get_connection(self)
session = 'ansible_%s' % int(time.time())
result = {'session': session}
rc, out, err = self.exec_command('configure session %s' % session)
if rc != 0:
self._module.fail_json(msg='unable to enter configuration mode', output=to_text(err, errors='surrogate_then_replace'))
if replace:
self.exec_command('rollback clean-config', check_rc=True)
rc, out, err = self.send_config(commands)
if rc != 0:
self.exec_command('abort')
self._module.fail_json(msg=to_text(err, errors='surrogate_then_replace'), commands=commands)
rc, out, err = self.exec_command('show session-config diffs')
if rc == 0 and out:
result['diff'] = to_text(out, errors='surrogate_then_replace').strip()
if commit:
self.exec_command('commit')
else:
self.exec_command('abort')
return result
class Eapi:
def __init__(self, module):
self._module = module
self._enable = None
self._session_support = None
self._device_configs = {}
host = module.params['provider']['host']
port = module.params['provider']['port']
self._module.params['url_username'] = self._module.params['username']
self._module.params['url_password'] = self._module.params['password']
if module.params['provider']['use_ssl']:
proto = 'https'
else:
proto = 'http'
module.params['validate_certs'] = module.params['provider']['validate_certs']
self._url = '%s://%s:%s/command-api' % (proto, host, port)
if module.params['auth_pass']:
self._enable = {'cmd': 'enable', 'input': module.params['auth_pass']}
else:
self._enable = 'enable'
@property
def supports_sessions(self):
if self._session_support:
return self._session_support
response = self.send_request(['show configuration sessions'])
self._session_support = 'error' not in response
return self._session_support
def _request_builder(self, commands, output, reqid=None):
params = dict(version=1, cmds=commands, format=output)
return dict(jsonrpc='2.0', id=reqid, method='runCmds', params=params)
def send_request(self, commands, output='text'):
commands = to_list(commands)
if self._enable:
commands.insert(0, 'enable')
body = self._request_builder(commands, output)
data = self._module.jsonify(body)
headers = {'Content-Type': 'application/json-rpc'}
timeout = self._module.params['timeout']
response, headers = fetch_url(
self._module, self._url, data=data, headers=headers,
method='POST', timeout=timeout
)
if headers['status'] != 200:
self._module.fail_json(**headers)
try:
data = response.read()
response = self._module.from_json(to_text(data, errors='surrogate_then_replace'))
except ValueError:
self._module.fail_json(msg='unable to load response from device', data=data)
if self._enable and 'result' in response:
response['result'].pop(0)
return response
def run_commands(self, commands):
"""Runs list of commands on remote device and returns results
"""
output = None
queue = list()
responses = list()
def _send(commands, output):
response = self.send_request(commands, output=output)
if 'error' in response:
err = response['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return response['result']
for item in to_list(commands):
if is_json(item['command']):
item['command'] = str(item['command']).replace('| json', '')
item['output'] = 'json'
if output and output != item['output']:
responses.extend(_send(queue, output))
queue = list()
output = item['output'] or 'json'
queue.append(item['command'])
if queue:
responses.extend(_send(queue, output))
for index, item in enumerate(commands):
try:
responses[index] = responses[index]['output'].strip()
except KeyError:
pass
return responses
def get_config(self, flags=[]):
"""Retrieves the current config from the device or cache
"""
cmd = 'show running-config '
cmd += ' '.join(flags)
cmd = cmd.strip()
try:
return self._device_configs[cmd]
except KeyError:
out = self.send_request(cmd)
cfg = str(out['result'][0]['output']).strip()
self._device_configs[cmd] = cfg
return cfg
def configure(self, commands):
"""Sends the ordered set of commands to the device
"""
cmds = ['configure terminal']
cmds.extend(commands)
responses = self.send_request(commands)
if 'error' in responses:
err = responses['error']
self._module.fail_json(msg=err['message'], code=err['code'])
return responses[1:]
def load_config(self, config, commit=False, replace=False):
"""Loads the configuration onto the remote devices
If the device doesn't support configuration sessions, this will
fallback to using configure() to load the commands. If that happens,
there will be no returned diff or session values
"""
if not self.supports_sessions:
return self.configure(self, config)
session = 'ansible_%s' % int(time.time())
result = {'session': session}
commands = ['configure session %s' % session]
if replace:
commands.append('rollback clean-config')
commands.extend(config)
response = self.send_request(commands)
if 'error' in response:
commands = ['configure session %s' % session, 'abort']
self.send_request(commands)
err = response['error']
self._module.fail_json(msg=err['message'], code=err['code'])
commands = ['configure session %s' % session, 'show session-config diffs']
if commit:
commands.append('commit')
else:
commands.append('abort')
response = self.send_request(commands, output='text')
diff = response['result'][1]['output']
if len(diff) > 0:
result['diff'] = diff
return result
def is_json(cmd):
return to_native(cmd, errors='surrogate_then_replace').endswith('| json')
def is_eapi(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
return 'eapi' in (transport, provider_transport)
def to_command(module, commands):
if is_eapi(module):
default_output = 'json'
else:
default_output = 'text'
transform = ComplexList(dict(
command=dict(key=True),
output=dict(default=default_output),
prompt=dict(),
answer=dict()
), module)
return transform(to_list(commands))
def get_config(module, flags=[]):
conn = get_connection(module)
return conn.get_config(flags)
def run_commands(module, commands):
conn = get_connection(module)
return conn.run_commands(to_command(module, commands))
def load_config(module, config, commit=False, replace=False):
conn = get_connection(module)
return conn.load_config(config, commit, replace)
| gpl-3.0 |
hgl888/chromium-crosswalk-efl | third_party/cython/src/Cython/Compiler/CmdLine.py | 90 | 8191 | #
# Cython - Command Line Parsing
#
import os
import sys
import Options
usage = """\
Cython (http://cython.org) is a compiler for code written in the
Cython language. Cython is based on Pyrex by Greg Ewing.
Usage: cython [options] sourcefile.{pyx,py} ...
Options:
-V, --version Display version number of cython compiler
-l, --create-listing Write error messages to a listing file
-I, --include-dir <directory> Search for include files in named directory
(multiple include directories are allowed).
-o, --output-file <filename> Specify name of generated C file
-t, --timestamps Only compile newer source files
-f, --force Compile all source files (overrides implied -t)
-v, --verbose Be verbose, print file names on multiple compilation
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
--cleanup <level> Release interned objects on python exit, for memory debugging.
Level indicates aggressiveness, default 0 releases nothing.
-w, --working <directory> Sets the working directory for Cython (the directory modules
are searched from)
--gdb Output debug information for cygdb
--gdb-outdir <directory> Specify gdb debug information output directory. Implies --gdb.
-D, --no-docstrings Strip docstrings from the compiled module.
-a, --annotate Produce a colorized HTML version of the source.
--line-directives Produce #line directives pointing to the .pyx source
--cplus Output a C++ rather than C file.
--embed[=<method_name>] Generate a main() function that embeds the Python interpreter.
-2 Compile based on Python-2 syntax and code semantics.
-3 Compile based on Python-3 syntax and code semantics.
--lenient Change some compile time errors to runtime errors to
improve Python compatibility
--capi-reexport-cincludes Add cincluded headers to any auto-generated header files.
--fast-fail Abort the compilation on the first error
--warning-errors, -Werror Make all warnings into errors
--warning-extra, -Wextra Enable extra warnings
-X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive
"""
#The following experimental options are supported only on MacOSX:
# -C, --compile Compile generated .c file to .o file
# --link Link .o file to produce extension module (implies -C)
# -+, --cplus Use C++ compiler for compiling and linking
# Additional .o files to link may be supplied when using -X."""
def bad_usage():
sys.stderr.write(usage)
sys.exit(1)
def parse_command_line(args):
from Cython.Compiler.Main import \
CompilationOptions, default_options
def pop_arg():
if args:
return args.pop(0)
else:
bad_usage()
def get_param(option):
tail = option[2:]
if tail:
return tail
else:
return pop_arg()
options = CompilationOptions(default_options)
sources = []
while args:
if args[0].startswith("-"):
option = pop_arg()
if option in ("-V", "--version"):
options.show_version = 1
elif option in ("-l", "--create-listing"):
options.use_listing_file = 1
elif option in ("-+", "--cplus"):
options.cplus = 1
elif option == "--embed":
Options.embed = "main"
elif option.startswith("--embed="):
Options.embed = option[8:]
elif option.startswith("-I"):
options.include_path.append(get_param(option))
elif option == "--include-dir":
options.include_path.append(pop_arg())
elif option in ("-w", "--working"):
options.working_path = pop_arg()
elif option in ("-o", "--output-file"):
options.output_file = pop_arg()
elif option in ("-t", "--timestamps"):
options.timestamps = 1
elif option in ("-f", "--force"):
options.timestamps = 0
elif option in ("-v", "--verbose"):
options.verbose += 1
elif option in ("-p", "--embed-positions"):
Options.embed_pos_in_docstring = 1
elif option in ("-z", "--pre-import"):
Options.pre_import = pop_arg()
elif option == "--cleanup":
Options.generate_cleanup_code = int(pop_arg())
elif option in ("-D", "--no-docstrings"):
Options.docstrings = False
elif option in ("-a", "--annotate"):
Options.annotate = True
elif option == "--convert-range":
Options.convert_range = True
elif option == "--line-directives":
options.emit_linenums = True
elif option == "--no-c-in-traceback":
options.c_line_in_traceback = False
elif option == "--gdb":
options.gdb_debug = True
options.output_dir = os.curdir
elif option == "--gdb-outdir":
options.gdb_debug = True
options.output_dir = pop_arg()
elif option == "--lenient":
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
elif option == '-2':
options.language_level = 2
elif option == '-3':
options.language_level = 3
elif option == "--capi-reexport-cincludes":
options.capi_reexport_cincludes = True
elif option == "--fast-fail":
Options.fast_fail = True
elif option in ('-Werror', '--warning-errors'):
Options.warning_errors = True
elif option in ('-Wextra', '--warning-extra'):
options.compiler_directives.update(Options.extra_warnings)
elif option == "--old-style-globals":
Options.old_style_globals = True
elif option == "--directive" or option.startswith('-X'):
if option.startswith('-X') and option[2:].strip():
x_args = option[2:]
else:
x_args = pop_arg()
try:
options.compiler_directives = Options.parse_directive_list(
x_args, relaxed_bool=True,
current_settings=options.compiler_directives)
except ValueError, e:
sys.stderr.write("Error in compiler directive: %s\n" % e.args[0])
sys.exit(1)
elif option.startswith('--debug'):
option = option[2:].replace('-', '_')
import DebugFlags
if option in dir(DebugFlags):
setattr(DebugFlags, option, True)
else:
sys.stderr.write("Unknown debug flag: %s\n" % option)
bad_usage()
elif option in ('-h', '--help'):
sys.stdout.write(usage)
sys.exit(0)
else:
sys.stderr.write("Unknown compiler flag: %s\n" % option)
sys.exit(1)
else:
sources.append(pop_arg())
if options.use_listing_file and len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using -o\n")
sys.exit(1)
if len(sources) == 0 and not options.show_version:
bad_usage()
if Options.embed and len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using -embed\n")
sys.exit(1)
return options, sources
| bsd-3-clause |
sorgerlab/indra | indra/sources/phosphoelm/api.py | 4 | 1149 | import csv
import logging
from .processor import PhosphoElmProcessor
logger = logging.getLogger(__name__)
def process_from_dump(fname, delimiter='\t'):
"""Process a Phospho.ELM file dump
The dump can be obtained at http://phospho.elm.eu.org/dataset.html.
Parameters
----------
fname : str
File path to the phospho.ELM file dump.
delimiter : str
The delimiter to use for csv.reader
Returns
-------
indra.sources.phosphoelm.PhosphoElmProcessor
An instance of a PhosphoElmProcessor containing the statements
generated from the file dump
"""
with open(fname, 'r') as f:
csv_reader = csv.reader(f, delimiter=delimiter)
ppelm_json = _get_json_from_entry_rows(csv_reader)
pep = PhosphoElmProcessor(ppelm_json)
pep.process_phosphorylations()
return pep
def _get_json_from_entry_rows(row_iter):
"""Loop body to generate a json friendly structure"""
ppelm_json = []
columns = next(row_iter)
for entry in row_iter:
row_dict = {c: e for c, e in zip(columns, entry)}
ppelm_json.append(row_dict)
return ppelm_json
| bsd-2-clause |
teknick/eve-wspace | evewspace/core/tasks.py | 3 | 6163 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from celery import task
from django.core.cache import cache
import urllib
import json
from models import Alliance, Corporation, NewsFeed
from API import utils as handler
import eveapi
import feedparser
@task()
def update_alliance(allianceID):
"""
Updates an alliance and it's corporations from the API.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
allianceapi = api.eve.AllianceList().alliances.Get(allianceID)
if Alliance.objects.filter(id=allianceID).count():
# Alliance exists, update it
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Pass on this exception because one Russian corp has an
# unavoidable bad character in their description
pass
alliance = Alliance.objects.get(id=allianceID)
alliance.name = allianceapi.name
alliance.shortname = allianceapi.shortName
# Check to see if we have a record for the executor
if Corporation.objects.filter(id=allianceapi.executorCorpID).count():
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
else:
# Alliance doesn't exists, add it without executor, update corps
# and then update the executor
alliance = Alliance(id=allianceapi.allianceID, name=allianceapi.name,
shortname=allianceapi.shortName, executor=None)
alliance.save()
for corp in allianceapi.memberCorporations:
try:
update_corporation(corp.corporationID)
except AttributeError:
# Fuck you, xCAPITALSx
pass
try:
# If an alliance's executor can't be processed for some reason,
# set it to None
alliance.executor = Corporation.objects.get(id=allianceapi.executorCorpID)
except:
alliance.executor = None
alliance.save()
@task()
def update_corporation(corpID, sync=False):
"""
Updates a corporation from the API. If it's alliance doesn't exist,
update that as well.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
# Encapsulate this in a try block because one corp has a fucked
# up character that chokes eveapi
try:
corpapi = api.corp.CorporationSheet(corporationID=corpID)
except:
raise AttributeError("Invalid Corp ID or Corp has malformed data.")
if corpapi.allianceID:
try:
alliance = Alliance.objects.get(id=corpapi.allianceID)
except:
# If the alliance doesn't exist, we start a task to add it
# and terminate this task since the alliance task will call
# it after creating the alliance object
if not sync:
update_alliance.delay(corpapi.allianceID)
return
else:
# Something is waiting and requires the corp object
# We set alliance to None and kick off the
# update_alliance task to fix it later
alliance = None
update_alliance.delay(corpapi.allianceID)
else:
alliance = None
if Corporation.objects.filter(id=corpID).count():
# Corp exists, update it
corp = Corporation.objects.get(id=corpID)
corp.member_count = corpapi.memberCount
corp.ticker = corpapi.ticker
corp.name = corpapi.corporationName
corp.alliance = alliance
corp.save()
else:
# Corp doesn't exist, create it
corp = Corporation(id=corpID, member_count=corpapi.memberCount,
name=corpapi.corporationName, alliance=alliance)
corp.save()
return corp
@task()
def update_all_alliances():
"""
Updates all corps in all alliances. This task will take a long time
to run.
"""
api = eveapi.EVEAPIConnection(cacheHandler=handler)
alliancelist = api.eve.AllianceList()
for alliance in alliancelist.alliances:
update_alliance(alliance.allianceID)
@task()
def cache_eve_reddit():
"""
Attempts to cache the top submissions to r/Eve.
"""
current = cache.get('reddit')
if not current:
# No reddit data is cached, grab it.
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
cache.set('reddit', data, 120)
else:
# There is cached data, let's try to update it
data = json.loads(urllib.urlopen('http://www.reddit.com/r/Eve/top.json').read())
if 'data' in data:
# Got valid response, store it
cache.set('reddit', data, 120)
else:
# Invalid response, refresh current data
cache.set('reddit', current, 120)
@task
def update_feeds():
"""
Caches and updates RSS feeds in NewsFeeds.
"""
for feed in NewsFeed.objects.all():
try:
data = feedparser.parse(feed.url)
cache.set('feed_%s' % feed.pk, data, 7200)
feed.name = data['feed']['title']
feed.description = data['feed']['subtitle']
feed.save()
except:
# There shouldn't be any exceptions, but we want to continue
# if there are.
pass
| gpl-3.0 |
yury-s/v8-inspector | PRESUBMIT.py | 16 | 14149 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import sys
_EXCLUDED_PATHS = ()
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckWatchlist(input_api, output_api):
"""Check that the WATCHLIST file parses correctly."""
errors = []
for f in input_api.AffectedFiles():
if f.LocalPath() != 'WATCHLISTS':
continue
import StringIO
import logging
import watchlists
log_buffer = StringIO.StringIO()
log_handler = logging.StreamHandler(log_buffer)
log_handler.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.addHandler(log_handler)
wl = watchlists.Watchlists(input_api.change.RepositoryRoot())
logger.removeHandler(log_handler)
log_handler.flush()
log_buffer.flush()
if log_buffer.getvalue():
errors.append(output_api.PresubmitError(
'Cannot parse WATCHLISTS file, please resolve.',
log_buffer.getvalue().splitlines()))
return errors
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS,
maxlen=800, license_header=license_header))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckTestExpectations(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckChromiumPlatformMacros(input_api, output_api))
results.extend(_CheckWatchlist(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _CheckTestExpectations(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
if any(path.startswith('LayoutTests') for path in local_paths):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'lint-test-expectations')
_, errs = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
return [output_api.PresubmitError(
"lint-test-expectations failed "
"to produce output; check by hand. ")]
if errs.strip() != 'Lint succeeded.':
return [output_api.PresubmitError(errs)]
return []
def _CheckStyle(input_api, output_api):
style_checker_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'check-webkit-style')
args = ([input_api.python_executable, style_checker_path, '--diff-files']
+ [f.LocalPath() for f in input_api.AffectedFiles()])
results = []
try:
child = input_api.subprocess.Popen(args,
stderr=input_api.subprocess.PIPE)
_, stderrdata = child.communicate()
if child.returncode != 0:
results.append(output_api.PresubmitError(
'check-webkit-style failed', [stderrdata]))
except Exception as e:
results.append(output_api.PresubmitNotifyResult(
'Could not run check-webkit-style', [str(e)]))
return results
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.realpath(input_api.os_path.join(
input_api.PresubmitLocalPath(), '..', '..', 'buildtools', 'checkdeps'))]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker(
input_api.os_path.join(input_api.PresubmitLocalPath()))
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
results.append(output_api.PresubmitPromptOrNotify(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckChromiumPlatformMacros(input_api, output_api, source_file_filter=None):
"""Ensures that Blink code uses WTF's platform macros instead of
Chromium's. Using the latter has resulted in at least one subtle
build breakage."""
os_macro_re = input_api.re.compile(r'^\s*#(el)?if.*\bOS_')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not os_macro_re.search(x),
input_api, source_file_filter)
errors = ['Found use of Chromium OS_* macro in %s. '
'Use WTF platform macros instead.' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def _CheckForPrintfDebugging(input_api, output_api):
"""Generally speaking, we'd prefer not to land patches that printf
debug output."""
printf_re = input_api.re.compile(r'^\s*printf\(')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not printf_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptOrNotify(
'printf debugging is best debugging! That said, it might '
'be a good idea to drop the following occurances from '
'your patch before uploading:\n%s' % '\n'.join(errors))]
return []
def _CheckForDangerousTestFunctions(input_api, output_api):
"""Tests should not be using serveAsynchronousMockedRequests, since it does
not guarantee that the threaded HTML parser will have completed."""
serve_async_requests_re = input_api.re.compile(
r'serveAsynchronousMockedRequests')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not serve_async_requests_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitError(
'You should be using FrameTestHelpers::'
'pumpPendingRequests() instead of '
'serveAsynchronousMockedRequests() in the following '
'locations:\n%s' % '\n'.join(errors))]
return []
def _CheckForFailInFile(input_api, f):
pattern = input_api.re.compile('^FAIL')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
path = input_api.os_path.join(
'..', '..', 'tools', 'checkperms', 'checkperms.py')
args = [sys.executable, path, '--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
checkperms = input_api.subprocess.Popen(
args, stdout=input_api.subprocess.PIPE)
errors = checkperms.communicate()[0].strip()
if errors:
return [output_api.PresubmitError(
'checkperms.py failed.', errors.splitlines())]
return []
def _CheckForInvalidPreferenceError(input_api, output_api):
pattern = input_api.re.compile('Invalid name for preference: (.+)')
results = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('-expected.txt'):
continue
for line_num, line in f.ChangedContents():
error = pattern.search(line)
if error:
results.append(output_api.PresubmitError('Found an invalid preference %s in expected result %s:%s' % (error.group(1), f, line_num)))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckStyle(input_api, output_api))
results.extend(_CheckForPrintfDebugging(input_api, output_api))
results.extend(_CheckForDangerousTestFunctions(input_api, output_api))
results.extend(_CheckForInvalidPreferenceError(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://blink-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
return {
'tryserver.blink': {
'android_blink_compile_dbg': set(['defaulttests']),
'android_blink_compile_rel': set(['defaulttests']),
'android_chromium_gn_compile_rel': set(['defaulttests']),
'linux_blink_compile_dbg': set(['defaulttests']),
'linux_blink_rel': set(['defaulttests']),
'linux_chromium_gn_rel': set(['defaulttests']),
'mac_blink_compile_dbg': set(['defaulttests']),
'mac_blink_rel': set(['defaulttests']),
'win_blink_compile_dbg': set(['defaulttests']),
'win_blink_rel': set(['defaulttests']),
},
}
| bsd-3-clause |
hradec/gaffer | python/GafferImageUI/DeepSampleCountsUI.py | 8 | 1994 | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferImage
Gaffer.Metadata.registerNode(
GafferImage.DeepSampleCounts,
"description",
"""
Outputs an image showing the deep sample counts for each pixel.
""",
)
| bsd-3-clause |
calvinpy/django-debug-toolbar | debug_toolbar/panels/templates/panel.py | 7 | 8856 | from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
from os.path import normpath
from pprint import pformat
import django
from django import http
from django.conf.urls import url
from django.db.models.query import QuerySet, RawQuerySet
from django.template import Context, RequestContext, Template
from django.test.signals import template_rendered
from django.test.utils import instrumented_test_render
from django.utils.encoding import force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.compat import (
OrderedDict, get_template_dirs, get_template_context_processors)
from debug_toolbar.panels import Panel
from debug_toolbar.panels.sql.tracking import recording, SQLQueryTriggered
from debug_toolbar.panels.templates import views
# Monkey-patch to enable the template_rendered signal. The receiver returns
# immediately when the panel is disabled to keep the overhead small.
# Code taken and adapted from Simon Willison and Django Snippets:
# http://www.djangosnippets.org/snippets/766/
if Template._render != instrumented_test_render:
Template.original_render = Template._render
Template._render = instrumented_test_render
# Monkey-patch to store items added by template context processors. The
# overhead is sufficiently small to justify enabling it unconditionally.
if django.VERSION[:2] < (1, 8):
def _request_context___init__(
self, request, dict_=None, processors=None, current_app=None,
use_l10n=None, use_tz=None):
Context.__init__(
self, dict_, current_app=current_app,
use_l10n=use_l10n, use_tz=use_tz)
if processors is None:
processors = ()
else:
processors = tuple(processors)
self.context_processors = OrderedDict()
updates = dict()
std_processors = get_template_context_processors()
for processor in std_processors + processors:
name = '%s.%s' % (processor.__module__, processor.__name__)
context = processor(request)
self.context_processors[name] = context
updates.update(context)
self.update(updates)
RequestContext.__init__ = _request_context___init__
else:
@contextmanager
def _request_context_bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
self.context_processors = OrderedDict()
updates = {}
for processor in processors:
name = '%s.%s' % (processor.__module__, processor.__name__)
context = processor(self.request)
self.context_processors[name] = context
updates.update(context)
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
RequestContext.bind_template = _request_context_bind_template
# Monkey-patch versions of Django where Template doesn't store origin.
# See https://code.djangoproject.com/ticket/16096.
if django.VERSION[:2] < (1, 7):
old_template_init = Template.__init__
def new_template_init(self, template_string, origin=None, name='<Unknown Template>'):
old_template_init(self, template_string, origin, name)
self.origin = origin
Template.__init__ = new_template_init
class TemplatesPanel(Panel):
"""
A panel that lists all templates used during processing of a response.
"""
def __init__(self, *args, **kwargs):
super(TemplatesPanel, self).__init__(*args, **kwargs)
self.templates = []
def _store_template_info(self, sender, **kwargs):
template, context = kwargs['template'], kwargs['context']
# Skip templates that we are generating through the debug toolbar.
if (isinstance(template.name, six.string_types) and
template.name.startswith('debug_toolbar/')):
return
context_list = []
for context_layer in context.dicts:
temp_layer = {}
if hasattr(context_layer, 'items'):
for key, value in context_layer.items():
# Replace any request elements - they have a large
# unicode representation and the request data is
# already made available from the Request panel.
if isinstance(value, http.HttpRequest):
temp_layer[key] = '<<request>>'
# Replace the debugging sql_queries element. The SQL
# data is already made available from the SQL panel.
elif key == 'sql_queries' and isinstance(value, list):
temp_layer[key] = '<<sql_queries>>'
# Replace LANGUAGES, which is available in i18n context processor
elif key == 'LANGUAGES' and isinstance(value, tuple):
temp_layer[key] = '<<languages>>'
# QuerySet would trigger the database: user can run the query from SQL Panel
elif isinstance(value, (QuerySet, RawQuerySet)):
model_name = "%s.%s" % (
value.model._meta.app_label, value.model.__name__)
temp_layer[key] = '<<%s of %s>>' % (
value.__class__.__name__.lower(), model_name)
else:
try:
recording(False)
pformat(value) # this MAY trigger a db query
except SQLQueryTriggered:
temp_layer[key] = '<<triggers database query>>'
except UnicodeEncodeError:
temp_layer[key] = '<<unicode encode error>>'
except Exception:
temp_layer[key] = '<<unhandled exception>>'
else:
temp_layer[key] = value
finally:
recording(True)
try:
context_list.append(pformat(temp_layer))
except UnicodeEncodeError:
pass
kwargs['context'] = [force_text(item) for item in context_list]
kwargs['context_processors'] = getattr(context, 'context_processors', None)
self.templates.append(kwargs)
# Implement the Panel API
nav_title = _("Templates")
@property
def title(self):
num_templates = len(self.templates)
return _("Templates (%(num_templates)s rendered)") % {'num_templates': num_templates}
@property
def nav_subtitle(self):
if self.templates:
return self.templates[0]['template'].name
return ''
template = 'debug_toolbar/panels/templates.html'
@classmethod
def get_urls(cls):
return [
url(r'^template_source/$', views.template_source, name='template_source'),
]
def enable_instrumentation(self):
template_rendered.connect(self._store_template_info)
def disable_instrumentation(self):
template_rendered.disconnect(self._store_template_info)
def generate_stats(self, request, response):
template_context = []
for template_data in self.templates:
info = {}
# Clean up some info about templates
template = template_data.get('template', None)
if hasattr(template, 'origin') and template.origin and template.origin.name:
template.origin_name = template.origin.name
else:
template.origin_name = _('No origin')
info['template'] = template
# Clean up context for better readability
if self.toolbar.config['SHOW_TEMPLATE_CONTEXT']:
context_list = template_data.get('context', [])
info['context'] = '\n'.join(context_list)
template_context.append(info)
# Fetch context_processors from any template
if self.templates:
context_processors = self.templates[0]['context_processors']
else:
context_processors = None
template_dirs = get_template_dirs()
self.record_stats({
'templates': template_context,
'template_dirs': [normpath(x) for x in template_dirs],
'context_processors': context_processors,
})
| bsd-3-clause |
brandond/ansible | lib/ansible/modules/network/fortios/fortios_router_policy.py | 21 | 14465 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_policy
short_description: Configure IPv4 routing policies in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by allowing the
user to set and modify router feature and policy category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: true
router_policy:
description:
- Configure IPv4 routing policies.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
action:
description:
- Action of the policy route.
choices:
- deny
- permit
comments:
description:
- Optional comments.
dst:
description:
- Destination IP and mask (x.x.x.x/x).
suboptions:
subnet:
description:
- IP and mask.
required: true
dst-negate:
description:
- Enable/disable negating destination address match.
choices:
- enable
- disable
dstaddr:
description:
- Destination address name.
suboptions:
name:
description:
- Address/group name. Source firewall.address.name firewall.addrgrp.name.
required: true
end-port:
description:
- End destination port number (0 - 65535).
end-source-port:
description:
- End source port number (0 - 65535).
gateway:
description:
- IP address of the gateway.
input-device:
description:
- Incoming interface name.
suboptions:
name:
description:
- Interface name. Source system.interface.name.
required: true
output-device:
description:
- Outgoing interface name. Source system.interface.name.
protocol:
description:
- Protocol number (0 - 255).
seq-num:
description:
- Sequence number.
required: true
src:
description:
- Source IP and mask (x.x.x.x/x).
suboptions:
subnet:
description:
- IP and mask.
required: true
src-negate:
description:
- Enable/disable negating source address match.
choices:
- enable
- disable
srcaddr:
description:
- Source address name.
suboptions:
name:
description:
- Address/group name. Source firewall.address.name firewall.addrgrp.name.
required: true
start-port:
description:
- Start destination port number (0 - 65535).
start-source-port:
description:
- Start source port number (0 - 65535).
status:
description:
- Enable/disable this policy route.
choices:
- enable
- disable
tos:
description:
- Type of service bit pattern.
tos-mask:
description:
- Type of service evaluated bits.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IPv4 routing policies.
fortios_router_policy:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_policy:
state: "present"
action: "deny"
comments: "<your_own_value>"
dst:
-
subnet: "<your_own_value>"
dst-negate: "enable"
dstaddr:
-
name: "default_name_9 (source firewall.address.name firewall.addrgrp.name)"
end-port: "10"
end-source-port: "11"
gateway: "<your_own_value>"
input-device:
-
name: "default_name_14 (source system.interface.name)"
output-device: "<your_own_value> (source system.interface.name)"
protocol: "16"
seq-num: "17"
src:
-
subnet: "<your_own_value>"
src-negate: "enable"
srcaddr:
-
name: "default_name_22 (source firewall.address.name firewall.addrgrp.name)"
start-port: "23"
start-source-port: "24"
status: "enable"
tos: "<your_own_value>"
tos-mask: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_router_policy_data(json):
option_list = ['action', 'comments', 'dst',
'dst-negate', 'dstaddr', 'end-port',
'end-source-port', 'gateway', 'input-device',
'output-device', 'protocol', 'seq-num',
'src', 'src-negate', 'srcaddr',
'start-port', 'start-source-port', 'status',
'tos', 'tos-mask']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def flatten_multilists_attributes(data):
multilist_attrs = []
for attr in multilist_attrs:
try:
path = "data['" + "']['".join(elem for elem in attr) + "']"
current_val = eval(path)
flattened_val = ' '.join(elem for elem in current_val)
exec(path + '= flattened_val')
except BaseException:
pass
return data
def router_policy(data, fos):
vdom = data['vdom']
router_policy_data = data['router_policy']
flattened_data = flatten_multilists_attributes(router_policy_data)
filtered_data = filter_router_policy_data(flattened_data)
if router_policy_data['state'] == "present":
return fos.set('router',
'policy',
data=filtered_data,
vdom=vdom)
elif router_policy_data['state'] == "absent":
return fos.delete('router',
'policy',
mkey=filtered_data['seq-num'],
vdom=vdom)
def fortios_router(data, fos):
login(data)
if data['router_policy']:
resp = router_policy(data, fos)
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"router_policy": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"action": {"required": False, "type": "str",
"choices": ["deny", "permit"]},
"comments": {"required": False, "type": "str"},
"dst": {"required": False, "type": "list",
"options": {
"subnet": {"required": True, "type": "str"}
}},
"dst-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dstaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"end-port": {"required": False, "type": "int"},
"end-source-port": {"required": False, "type": "int"},
"gateway": {"required": False, "type": "str"},
"input-device": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"output-device": {"required": False, "type": "str"},
"protocol": {"required": False, "type": "int"},
"seq-num": {"required": True, "type": "int"},
"src": {"required": False, "type": "list",
"options": {
"subnet": {"required": True, "type": "str"}
}},
"src-negate": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"srcaddr": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"start-port": {"required": False, "type": "int"},
"start-source-port": {"required": False, "type": "int"},
"status": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"tos": {"required": False, "type": "str"},
"tos-mask": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_router(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
omouse/staykat | app/planner/models.py | 1 | 2998 | from googleplaces import GooglePlaces
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
class GeoModel(models.Model):
"""
A model for representing a model that has a geographical location.
Cannot use GeoDjango with Mongodb or with Google App Engine. This model emulates
Google App Engine's GeoPt field type:
https://developers.google.com/appengine/docs/python/datastore/typesandpropertyclasses#GeoPt
"""
latitude = models.FloatField(blank=True, null=True)
longitude = models.FloatField(blank=True, null=True)
class Meta:
abstract = True
class City(GeoModel):
name = models.CharField(max_length=60, verbose_name=_('Name'))
region = models.CharField(max_length=60, verbose_name=_('Region'))
country = models.CharField(max_length=100, verbose_name=_('Country'), unique=True)
nearby_cities = models.ManyToManyField('self', blank=True)
class Meta:
verbose_name = _('City')
verbose_name_plural = _('Cities')
def __unicode__(self):
return '%s, %s, %s' % (self.name, self.region, self.country)
class Category(models.Model):
code = models.CharField(max_length=60)
label = models.CharField(max_length=60)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __unicode__(self):
return _(self.label)
class Place(GeoModel):
name = models.CharField(max_length=200, verbose_name=_('Name'))
description = models.CharField(max_length=500, verbose_name=_('Description'))
url = models.CharField(max_length=300)
categories = models.ManyToManyField(Category)
street_address = models.CharField(max_length=200, verbose_name=_('Street Address'))
city = models.ForeignKey(City, blank=True, null=True)
googlemaps_reference = models.CharField(max_length=300, blank=True)
googlemaps_rating = models.FloatField(blank=True, null=True)
requires_update_from_googlemaps = models.BooleanField(default=False)
class Meta:
verbose_name = _('Place')
verbose_name_plural = _('Places')
def __unicode__(self):
return self.name
def update_from_googlemaps(self):
google_places = GooglePlaces(settings.GOOGLE_API_KEY)
place = google_places.get_place(self.googlemaps_reference)
self.url = place.website
self.latitude = place.geo_location['lat']
self.longitude = place.geo_location['lng']
self.street_address = place.formatted_address
self.requires_update_from_googlemaps = False
self.googlemaps_rating = place.rating
self.save()
class Event(models.Model):
name = models.CharField(max_length=300, verbose_name=_('Name'))
place = models.ForeignKey(Place)
start_time = models.DateTimeField()
end_time = models.DateTimeField(blank=True, null=True)
class Meta:
verbose_name = _('Event')
verbose_name_plural = _('Events')
| agpl-3.0 |
lino-framework/lino | lino/utils/jscompressor.py | 3 | 5390 | # {{{ http://code.activestate.com/recipes/496882/ (r8)
'''
http://code.activestate.com/recipes/496882/
Author: Michael Palmer 13 Jul 2006
a regex-based JavaScript code compression kludge
'''
from __future__ import division
from __future__ import print_function
from builtins import range
from builtins import object
from past.utils import old_div
import re
class JSCompressor(object):
def __init__(self, compressionLevel=2, measureCompression=False):
'''
compressionLevel:
0 - no compression, script returned unchanged. For debugging only -
try if you suspect that compression compromises your script
1 - Strip comments and empty lines, don't change line breaks and indentation (code remains readable)
2 - Additionally strip insignificant whitespace (code will become quite unreadable)
measureCompression: append a comment stating the extent of compression
'''
self.compressionLevel = compressionLevel
self.measureCompression = measureCompression
# a bunch of regexes used in compression
# first, exempt string and regex literals from compression by transient
# substitution
findLiterals = re.compile(r'''
(\'.*?(?<=[^\\])\') | # single-quoted strings
(\".*?(?<=[^\\])\") | # double-quoted strings
((?<![\*\/])\/(?![\/\*]).*?(?<![\\])\/) # JS regexes, trying hard not to be tripped up by comments
''', re.VERBOSE)
# literals are temporarily replaced by numbered placeholders
literalMarker = '@_@%d@_@' # temporary replacement
# put the string literals back in
backSubst = re.compile('@_@(\d+)@_@')
# /* ... */ comments on single line
mlc1 = re.compile(r'(\/\*.*?\*\/)')
mlc = re.compile(r'(\/\*.*?\*\/)', re.DOTALL) # real multiline comments
slc = re.compile('\/\/.*') # remove single line comments
# collapse successive non-leading white space characters into one
collapseWs = re.compile('(?<=\S)[ \t]+')
squeeze = re.compile('''
\s+(?=[\}\]\)\:\&\|\=\;\,\.\+]) | # remove whitespace preceding control characters
(?<=[\{\[\(\:\&\|\=\;\,\.\+])\s+ | # ... or following such
[ \t]+(?=\W) | # remove spaces or tabs preceding non-word characters
(?<=\W)[ \t]+ # ... or following such
'''
, re.VERBOSE | re.DOTALL)
def compress(self, script):
'''
perform compression and return compressed script
'''
if self.compressionLevel == 0:
return script
lengthBefore = len(script)
# first, substitute string literals by placeholders to prevent the
# regexes messing with them
literals = []
def insertMarker(mo):
l = mo.group()
literals.append(l)
return self.literalMarker % (len(literals) - 1)
script = self.findLiterals.sub(insertMarker, script)
# now, to the literal-stripped carcass, apply some kludgy regexes for
# deflation...
script = self.slc.sub('', script) # strip single line comments
# replace /* .. */ comments on single lines by space
script = self.mlc1.sub(' ', script)
# replace real multiline comments by newlines
script = self.mlc.sub('\n', script)
# remove empty lines and trailing whitespace
script = '\n'.join([l.rstrip()
for l in script.splitlines() if l.strip()])
# squeeze out any dispensible whitespace
if self.compressionLevel == 2:
script = self.squeeze.sub('', script)
# only collapse multiple whitespace characters
elif self.compressionLevel == 1:
script = self.collapseWs.sub(' ', script)
# now back-substitute the string and regex literals
def backsub(mo):
return literals[int(mo.group(1))]
script = self.backSubst.sub(backsub, script)
if self.measureCompression:
lengthAfter = float(len(script))
squeezedBy = int(100 * (1 - old_div(lengthAfter, lengthBefore)))
script += '\n// squeezed out %s%%\n' % squeezedBy
return script
if __name__ == '__main__':
script = '''
/* this is a totally useless multiline comment, containing a silly "quoted string",
surrounded by several superfluous line breaks
*/
// and this is an equally important single line comment
sth = "this string contains 'quotes', a /regex/ and a // comment yet it will survive compression";
function wurst(){ // this is a great function
var hans = 33;
}
sthelse = 'and another useless string';
function hans(){ // another function
var bill = 66; // successive spaces will be collapsed into one;
var bob = 77 // this line break will be preserved b/c of lacking semicolon
var george = 88;
}
'''
for x in range(1, 3):
print('\ncompression level', x, ':\n--------------')
c = JSCompressor(compressionLevel=x, measureCompression=True)
cpr = c.compress(script)
print(cpr)
print('length', len(cpr))
# end of http://code.activestate.com/recipes/496882/ }}}
| bsd-2-clause |
liamchzh/shadowsocks | tests/test.py | 39 | 5023 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
| apache-2.0 |
eric-haibin-lin/mxnet | ci/util.py | 1 | 5808 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import logging
import logging.config
import os
import subprocess
import sys
import requests
def get_mxnet_root() -> str:
curpath = os.path.abspath(os.path.dirname(__file__))
def is_mxnet_root(path: str) -> bool:
return os.path.exists(os.path.join(path, ".mxnet_root"))
while not is_mxnet_root(curpath):
parent = os.path.abspath(os.path.join(curpath, os.pardir))
if parent == curpath:
raise RuntimeError("Got to the root and couldn't find a parent folder with .mxnet_root")
curpath = parent
return curpath
@contextlib.contextmanager
def remember_cwd():
'''
Restore current directory when exiting context
'''
curdir = os.getcwd()
try: yield
finally: os.chdir(curdir)
def retry(target_exception, tries=4, delay_s=1, backoff=2):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param target_exception: the exception to check. may be a tuple of
exceptions to check
:type target_exception: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay_s: initial delay between retries in seconds
:type delay_s: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
"""
import time
from functools import wraps
def decorated_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay_s
while mtries > 1:
try:
return f(*args, **kwargs)
except target_exception as e:
logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return decorated_retry
# noinspection SyntaxError
def under_ci() -> bool:
""":return: True if we run in Jenkins."""
return 'JOB_NAME' in os.environ
def ec2_instance_info() -> str:
import requests
if under_ci():
result = []
try:
r = requests.get("http://instance-data/latest/meta-data/instance-type")
if r.status_code == 200:
result.append(r.content.decode())
r = requests.get("http://instance-data/latest/meta-data/instance-id")
if r.status_code == 200:
result.append(r.content.decode())
r = requests.get("http://instance-data/latest/meta-data/public-hostname")
if r.status_code == 200:
result.append(r.content.decode())
return ' '.join(result)
except ConnectionError:
pass
return '?'
else:
return ''
def chdir_to_script_directory():
# We need to be in the same directory than the script so the commands in the dockerfiles work as
# expected. But the script can be invoked from a different path
base = os.path.split(os.path.realpath(__file__))[0]
os.chdir(base)
def script_name() -> str:
""":returns: script name with leading paths removed"""
return os.path.split(sys.argv[0])[1]
def config_logging():
conf_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "logging.conf")
logging.config.fileConfig(os.getenv('LOGGING_CONF', conf_path))
# Force botocore and requests are set to WARNING to avoid leaking any credentials
# or sensitive information
logging.getLogger("botocore").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
# Takes url and downloads it to the dest_path directory on Windows.
def download_file(url, dest_path):
file_name = url.split('/')[-1]
full_path = "{}\\{}".format(dest_path, file_name)
logging.info("Downloading: {}".format(full_path))
r = requests.get(url, stream=True)
if r.status_code == 404:
return r.status_code
elif r.status_code != 200:
logging.error("{} returned status code {}".format(url, r.status_code))
with open(full_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
return full_path
# Takes arguments and runs command on host. Shell is disabled by default.
def run_command(args, shell=False):
try:
logging.info("Issuing command: {}".format(args))
res = subprocess.check_output(args, shell=shell, timeout=1800).decode("utf-8").replace("\r\n", "")
logging.info("Output: {}".format(res))
except subprocess.CalledProcessError as e:
raise RuntimeError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
return res
| apache-2.0 |
imito/odin | benchmarks/two_way_2_group_data_into_batch.py | 1 | 4082 | # python two_way_2_group_data_into_batch.py -m memory_profiler
# group: 165 + 10.9 MB and 24.3 (s/iter)
# group2: 164 + 43.6 MB (old method) and 15.6 (s/iter)
from __future__ import print_function, division, absolute_import
import os
os.environ['ODIN'] = 'theano,cpu,float32'
from six.moves import zip_longest, cPickle
import numpy as np
from odin import backend as K, nnet as N, fuel as F
from odin.utils import UnitTimer
from memory_profiler import profile
ds = F.Dataset('/home/trung/data/estonia_audio32')
indices = np.genfromtxt(ds['indices.csv'], dtype=str, delimiter=' ')
name, start, end = indices[0]
x0 = ds['mfcc'][int(start):int(end)]
x0 = (name, [x0, x0])
name, start, end = indices[1]
x1 = ds['mfcc'][int(start):int(end)]
x1 = (name, [x1, x1])
name, start, end = indices[2]
x2 = ds['mfcc'][int(start):int(end)]
x2 = (name, [x2, x2])
@profile
def group(batch):
""" batch: contains
[
(name, [list of data], [list of others]),
(name, [list of data], [list of others]),
(name, [list of data], [list of others]),
...
]
Note
----
We assume the shape[0] (or length) of all "data" and "others" are
the same
"""
rng = np.random.RandomState(1234)
batch_size = 64
indices = [range((b[1][0].shape[0] - 1) // batch_size + 1)
for b in batch]
# shuffle if possible
if rng is not None:
[rng.shuffle(i) for i in indices]
# ====== create batch of data ====== #
for idx in zip_longest(*indices):
ret = []
for i, b in zip(idx, batch):
# skip if one of the data is not enough
if i is None: continue
# pick data from each given input
name = b[0]; data = b[1]; others = b[2:]
start = i * batch_size
end = start + batch_size
_ = [d[start:end] for d in data] + \
[o[start:end] for o in others]
ret.append(_)
ret = [np.concatenate(x, axis=0) for x in zip(*ret)]
# # shuffle 1 more time
if rng is not None:
permutation = rng.permutation(ret[0].shape[0])
ret = [r[permutation] for r in ret]
# # return the batches
for i in range((ret[0].shape[0] - 1) // batch_size + 1):
start = i * batch_size
end = start + batch_size
_ = [x[start:end] for x in ret]
# always return tuple or list
if _ is not None:
yield _ if isinstance(_, (tuple, list)) else (ret,)
def group2(batch):
rng = np.random.RandomState(1234)
batch_size = 64
length = len(batch[0]) # size of 1 batch
nb_data = len(batch[0][1])
X = [[] for i in range(nb_data)]
Y = [[] for i in range(length - 2)]
for b in batch:
name = b[0]; data = b[1]; others = b[2:]
# training data can be list of Data or just 1 Data
for i, j in zip(X, data):
i.append(j)
# labels can be None (no labels given)
for i, j in zip(Y, others):
i.append(j)
# ====== stack everything into big array ====== #
X = [np.vstack(x) for x in X]
shape0 = X[0].shape[0]
Y = [np.concatenate(y, axis=0) for y in Y]
# ====== shuffle for the whole batch ====== #
if rng is not None:
permutation = rng.permutation(shape0)
X = [x[permutation] for x in X]
Y = [y[permutation] if y.shape[0] == shape0 else y
for y in Y]
# ====== create batch ====== #
for i in range((shape0 - 1) // batch_size + 1):
start = i * batch_size
end = start + batch_size
# list of Data is given
x = [x[start:end] for x in X]
y = [y[start:end] for y in Y]
ret = x + y
# always return tuple or list
if ret is not None:
yield ret if isinstance(ret, (tuple, list)) else (ret,)
@profile
def test():
with UnitTimer(12):
for _ in range(12):
for i, j in group((x0, x1, x2)):
# print(i.shape, j.shape)
pass
test()
| mit |
akosyakov/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/backends/spatialite/models.py | 403 | 1847 | """
The GeometryColumns and SpatialRefSys models for the SpatiaLite backend.
"""
from django.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"""
The 'geometry_columns' table from SpatiaLite.
"""
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
type = models.CharField(max_length=30)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
spatial_index_enabled = models.IntegerField()
class Meta:
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'f_geometry_column'
def __unicode__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
@property
def wkt(self):
from django.contrib.gis.gdal import SpatialReference
return SpatialReference(self.proj4text).wkt
class Meta:
db_table = 'spatial_ref_sys'
managed = False
| apache-2.0 |
Drekscott/Motlaesaleor | saleor/registration/migrations/0001_initial.py | 19 | 2160 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import saleor.registration.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='EmailChangeRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(unique=True, max_length=36)),
('valid_until', models.DateTimeField(default=saleor.registration.models.default_valid_date)),
('email', models.EmailField(max_length=254)),
('user', models.ForeignKey(related_name='email_change_requests', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='EmailConfirmationRequest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(unique=True, max_length=36)),
('valid_until', models.DateTimeField(default=saleor.registration.models.default_valid_date)),
('email', models.EmailField(max_length=254)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ExternalUserData',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('service', models.TextField(db_index=True)),
('username', models.TextField(db_index=True)),
('user', models.ForeignKey(related_name='external_ids', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='externaluserdata',
unique_together=set([('service', 'username')]),
),
]
| bsd-3-clause |
spencerjanssen/Flexget | flexget/plugins/modify/plugin_priority.py | 13 | 1804 | from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.event import event
log = logging.getLogger('p_priority')
class PluginPriority(object):
"""
Allows modifying plugin priorities from default values.
Example:
plugin_priority:
ignore: 50
series: 100
"""
schema = {'type': 'object', 'additionalProperties': {'type': 'integer'}}
def __init__(self):
self.priorities = {}
def on_task_start(self, task, config):
self.priorities = {}
names = []
for name, priority in config.iteritems():
names.append(name)
originals = self.priorities.setdefault(name, {})
for phase, event in plugin.plugins[name].phase_handlers.iteritems():
originals[phase] = event.priority
log.debug('stored %s original value %s' % (phase, event.priority))
event.priority = priority
log.debug('set %s new value %s' % (phase, priority))
log.debug('Changed priority for: %s' % ', '.join(names))
def on_task_exit(self, task, config):
if not self.priorities:
log.debug('nothing changed, aborting restore')
return
names = []
for name in config.keys():
names.append(name)
originals = self.priorities[name]
for phase, priority in originals.iteritems():
plugin.plugins[name].phase_handlers[phase].priority = priority
log.debug('Restored priority for: %s' % ', '.join(names))
self.priorities = {}
on_task_abort = on_task_exit
@event('plugin.register')
def register_plugin():
plugin.register(PluginPriority, 'plugin_priority', api_ver=2)
| mit |
safwanrahman/mozillians | vendor-local/lib/python/rest_framework/tests/test_serializer_nested.py | 19 | 12564 | """
Tests to cover nested serializers.
Doesn't cover model serializers.
"""
from __future__ import unicode_literals
from django.test import TestCase
from rest_framework import serializers
from . import models
class WritableNestedSerializerBasicTests(TestCase):
"""
Tests for deserializing nested entities.
Basic tests that use serializers that simply restore to dicts.
"""
def setUp(self):
class TrackSerializer(serializers.Serializer):
order = serializers.IntegerField()
title = serializers.CharField(max_length=100)
duration = serializers.IntegerField()
class AlbumSerializer(serializers.Serializer):
album_name = serializers.CharField(max_length=100)
artist = serializers.CharField(max_length=100)
tracks = TrackSerializer(many=True)
self.AlbumSerializer = AlbumSerializer
def test_nested_validation_success(self):
"""
Correct nested serialization should return the input data.
"""
data = {
'album_name': 'Discovery',
'artist': 'Daft Punk',
'tracks': [
{'order': 1, 'title': 'One More Time', 'duration': 235},
{'order': 2, 'title': 'Aerodynamic', 'duration': 184},
{'order': 3, 'title': 'Digital Love', 'duration': 239}
]
}
serializer = self.AlbumSerializer(data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, data)
def test_nested_validation_error(self):
"""
Incorrect nested serialization should return appropriate error data.
"""
data = {
'album_name': 'Discovery',
'artist': 'Daft Punk',
'tracks': [
{'order': 1, 'title': 'One More Time', 'duration': 235},
{'order': 2, 'title': 'Aerodynamic', 'duration': 184},
{'order': 3, 'title': 'Digital Love', 'duration': 'foobar'}
]
}
expected_errors = {
'tracks': [
{},
{},
{'duration': ['Enter a whole number.']}
]
}
serializer = self.AlbumSerializer(data=data)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
def test_many_nested_validation_error(self):
"""
Incorrect nested serialization should return appropriate error data
when multiple entities are being deserialized.
"""
data = [
{
'album_name': 'Russian Red',
'artist': 'I Love Your Glasses',
'tracks': [
{'order': 1, 'title': 'Cigarettes', 'duration': 121},
{'order': 2, 'title': 'No Past Land', 'duration': 198},
{'order': 3, 'title': 'They Don\'t Believe', 'duration': 191}
]
},
{
'album_name': 'Discovery',
'artist': 'Daft Punk',
'tracks': [
{'order': 1, 'title': 'One More Time', 'duration': 235},
{'order': 2, 'title': 'Aerodynamic', 'duration': 184},
{'order': 3, 'title': 'Digital Love', 'duration': 'foobar'}
]
}
]
expected_errors = [
{},
{
'tracks': [
{},
{},
{'duration': ['Enter a whole number.']}
]
}
]
serializer = self.AlbumSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), False)
self.assertEqual(serializer.errors, expected_errors)
class WritableNestedSerializerObjectTests(TestCase):
"""
Tests for deserializing nested entities.
These tests use serializers that restore to concrete objects.
"""
def setUp(self):
# Couple of concrete objects that we're going to deserialize into
class Track(object):
def __init__(self, order, title, duration):
self.order, self.title, self.duration = order, title, duration
def __eq__(self, other):
return (
self.order == other.order and
self.title == other.title and
self.duration == other.duration
)
class Album(object):
def __init__(self, album_name, artist, tracks):
self.album_name, self.artist, self.tracks = album_name, artist, tracks
def __eq__(self, other):
return (
self.album_name == other.album_name and
self.artist == other.artist and
self.tracks == other.tracks
)
# And their corresponding serializers
class TrackSerializer(serializers.Serializer):
order = serializers.IntegerField()
title = serializers.CharField(max_length=100)
duration = serializers.IntegerField()
def restore_object(self, attrs, instance=None):
return Track(attrs['order'], attrs['title'], attrs['duration'])
class AlbumSerializer(serializers.Serializer):
album_name = serializers.CharField(max_length=100)
artist = serializers.CharField(max_length=100)
tracks = TrackSerializer(many=True)
def restore_object(self, attrs, instance=None):
return Album(attrs['album_name'], attrs['artist'], attrs['tracks'])
self.Album, self.Track = Album, Track
self.AlbumSerializer = AlbumSerializer
def test_nested_validation_success(self):
"""
Correct nested serialization should return a restored object
that corresponds to the input data.
"""
data = {
'album_name': 'Discovery',
'artist': 'Daft Punk',
'tracks': [
{'order': 1, 'title': 'One More Time', 'duration': 235},
{'order': 2, 'title': 'Aerodynamic', 'duration': 184},
{'order': 3, 'title': 'Digital Love', 'duration': 239}
]
}
expected_object = self.Album(
album_name='Discovery',
artist='Daft Punk',
tracks=[
self.Track(order=1, title='One More Time', duration=235),
self.Track(order=2, title='Aerodynamic', duration=184),
self.Track(order=3, title='Digital Love', duration=239),
]
)
serializer = self.AlbumSerializer(data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected_object)
def test_many_nested_validation_success(self):
"""
Correct nested serialization should return multiple restored objects
that corresponds to the input data when multiple objects are
being deserialized.
"""
data = [
{
'album_name': 'Russian Red',
'artist': 'I Love Your Glasses',
'tracks': [
{'order': 1, 'title': 'Cigarettes', 'duration': 121},
{'order': 2, 'title': 'No Past Land', 'duration': 198},
{'order': 3, 'title': 'They Don\'t Believe', 'duration': 191}
]
},
{
'album_name': 'Discovery',
'artist': 'Daft Punk',
'tracks': [
{'order': 1, 'title': 'One More Time', 'duration': 235},
{'order': 2, 'title': 'Aerodynamic', 'duration': 184},
{'order': 3, 'title': 'Digital Love', 'duration': 239}
]
}
]
expected_object = [
self.Album(
album_name='Russian Red',
artist='I Love Your Glasses',
tracks=[
self.Track(order=1, title='Cigarettes', duration=121),
self.Track(order=2, title='No Past Land', duration=198),
self.Track(order=3, title='They Don\'t Believe', duration=191),
]
),
self.Album(
album_name='Discovery',
artist='Daft Punk',
tracks=[
self.Track(order=1, title='One More Time', duration=235),
self.Track(order=2, title='Aerodynamic', duration=184),
self.Track(order=3, title='Digital Love', duration=239),
]
)
]
serializer = self.AlbumSerializer(data=data, many=True)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected_object)
class ForeignKeyNestedSerializerUpdateTests(TestCase):
def setUp(self):
class Artist(object):
def __init__(self, name):
self.name = name
def __eq__(self, other):
return self.name == other.name
class Album(object):
def __init__(self, name, artist):
self.name, self.artist = name, artist
def __eq__(self, other):
return self.name == other.name and self.artist == other.artist
class ArtistSerializer(serializers.Serializer):
name = serializers.CharField()
def restore_object(self, attrs, instance=None):
if instance:
instance.name = attrs['name']
else:
instance = Artist(attrs['name'])
return instance
class AlbumSerializer(serializers.Serializer):
name = serializers.CharField()
by = ArtistSerializer(source='artist')
def restore_object(self, attrs, instance=None):
if instance:
instance.name = attrs['name']
instance.artist = attrs['artist']
else:
instance = Album(attrs['name'], attrs['artist'])
return instance
self.Artist = Artist
self.Album = Album
self.AlbumSerializer = AlbumSerializer
def test_create_via_foreign_key_with_source(self):
"""
Check that we can both *create* and *update* into objects across
ForeignKeys that have a `source` specified.
Regression test for #1170
"""
data = {
'name': 'Discovery',
'by': {'name': 'Daft Punk'},
}
expected = self.Album(artist=self.Artist('Daft Punk'), name='Discovery')
# create
serializer = self.AlbumSerializer(data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected)
# update
original = self.Album(artist=self.Artist('The Bats'), name='Free All the Monsters')
serializer = self.AlbumSerializer(instance=original, data=data)
self.assertEqual(serializer.is_valid(), True)
self.assertEqual(serializer.object, expected)
class NestedModelSerializerUpdateTests(TestCase):
def test_second_nested_level(self):
john = models.Person.objects.create(name="john")
post = john.blogpost_set.create(title="Test blog post")
post.blogpostcomment_set.create(text="I hate this blog post")
post.blogpostcomment_set.create(text="I love this blog post")
class BlogPostCommentSerializer(serializers.ModelSerializer):
class Meta:
model = models.BlogPostComment
class BlogPostSerializer(serializers.ModelSerializer):
comments = BlogPostCommentSerializer(many=True, source='blogpostcomment_set')
class Meta:
model = models.BlogPost
fields = ('id', 'title', 'comments')
class PersonSerializer(serializers.ModelSerializer):
posts = BlogPostSerializer(many=True, source='blogpost_set')
class Meta:
model = models.Person
fields = ('id', 'name', 'age', 'posts')
serialize = PersonSerializer(instance=john)
deserialize = PersonSerializer(data=serialize.data, instance=john)
self.assertTrue(deserialize.is_valid())
result = deserialize.object
result.save()
self.assertEqual(result.id, john.id)
| bsd-3-clause |
ProjectSWGCore/NGECore2 | scripts/mobiles/tatooine/jabba_swooper.py | 2 | 1269 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('jabbas_swooper')
mobileTemplate.setLevel(13)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(False)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("jabba")
mobileTemplate.setAssistRange(0)
mobileTemplate.setStalker(True)
templates = Vector()
templates.add('object/mobile/shared_dressed_tatooine_jabba_swooper.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('jabba_swooper', mobileTemplate)
return | lgpl-3.0 |
cmu-delphi/delphi-epidata | tests/server/test_validate.py | 1 | 10987 | """Unit tests for granular sensor authentication in api.php."""
# standard library
import unittest
import base64
# from flask.testing import FlaskClient
from delphi.epidata.server._common import app
from delphi.epidata.server._validate import (
resolve_auth_token,
check_auth_token,
require_all,
require_any,
extract_strings,
extract_integers,
extract_integer,
extract_date,
extract_dates
)
from delphi.epidata.server._exceptions import (
ValidationFailedException,
UnAuthenticatedException,
)
# py3tester coverage target
__test_target__ = "delphi.epidata.server._validate"
class UnitTests(unittest.TestCase):
"""Basic unit tests."""
# app: FlaskClient
def setUp(self):
app.config["TESTING"] = True
app.config["WTF_CSRF_ENABLED"] = False
app.config["DEBUG"] = False
def test_resolve_auth_token(self):
with self.subTest("no auth"):
with app.test_request_context("/"):
self.assertIsNone(resolve_auth_token())
with self.subTest("param"):
with app.test_request_context("/?auth=abc"):
self.assertEqual(resolve_auth_token(), "abc")
with self.subTest("bearer token"):
with app.test_request_context("/", headers={"Authorization": "Bearer abc"}):
self.assertEqual(resolve_auth_token(), "abc")
with self.subTest("basic token"):
userpass = base64.b64encode(b"epidata:abc").decode("utf-8")
with app.test_request_context(
"/", headers={"Authorization": f"Basic {userpass}"}
):
self.assertEqual(resolve_auth_token(), "abc")
def test_check_auth_token(self):
with self.subTest("no auth but optional"):
with app.test_request_context("/"):
self.assertFalse(check_auth_token("abc", True))
with self.subTest("no auth but required"):
with app.test_request_context("/"):
self.assertRaises(
ValidationFailedException, lambda: check_auth_token("abc")
)
with self.subTest("auth and required"):
with app.test_request_context("/?auth=abc"):
self.assertTrue(check_auth_token("abc"))
with self.subTest("auth and required but wrong"):
with app.test_request_context("/?auth=abc"):
self.assertRaises(
UnAuthenticatedException, lambda: check_auth_token("def")
)
with self.subTest("auth and required but wrong but optional"):
with app.test_request_context("/?auth=abc"):
self.assertFalse(check_auth_token("def", True))
def test_require_all(self):
with self.subTest("all given"):
with app.test_request_context("/"):
self.assertTrue(require_all())
with app.test_request_context("/?abc=abc&def=3"):
self.assertTrue(require_all("abc", "def"))
with self.subTest("missing parameter"):
with app.test_request_context("/?abc=abc"):
self.assertRaises(
ValidationFailedException, lambda: require_all("abc", "def")
)
with self.subTest("missing empty parameter"):
with app.test_request_context("/?abc=abc&def="):
self.assertRaises(
ValidationFailedException, lambda: require_all("abc", "def")
)
def test_require_any(self):
with self.subTest("default given"):
with app.test_request_context("/"):
self.assertRaises(ValidationFailedException, lambda: require_any("abc"))
with self.subTest("one option give"):
with app.test_request_context("/?abc=abc"):
self.assertTrue(require_any("abc", "def"))
with self.subTest("multiple options given"):
with app.test_request_context("/?abc=abc&def=d"):
self.assertTrue(require_any("abc", "def"))
with self.subTest("one options given with is empty"):
with app.test_request_context("/?abc="):
self.assertRaises(ValidationFailedException, lambda: require_any("abc"))
with self.subTest("one options given with is empty but ok"):
with app.test_request_context("/?abc="):
self.assertTrue(require_any("abc", empty=True))
def test_extract_strings(self):
with self.subTest("empty"):
with app.test_request_context("/"):
self.assertIsNone(extract_strings("s"))
with self.subTest("single"):
with app.test_request_context("/?s=a"):
self.assertEqual(extract_strings("s"), ["a"])
with self.subTest("multiple"):
with app.test_request_context("/?s=a,b"):
self.assertEqual(extract_strings("s"), ["a", "b"])
with self.subTest("multiple param"):
with app.test_request_context("/?s=a&s=b"):
self.assertEqual(extract_strings("s"), ["a", "b"])
with self.subTest("multiple param mixed"):
with app.test_request_context("/?s=a&s=b,c"):
self.assertEqual(extract_strings("s"), ["a", "b", "c"])
def test_extract_integer(self):
with self.subTest("empty"):
with app.test_request_context("/"):
self.assertIsNone(extract_integer("s"))
with self.subTest("single"):
with app.test_request_context("/?s=1"):
self.assertEqual(extract_integer("s"), 1)
with self.subTest("not a number"):
with app.test_request_context("/?s=a"):
self.assertRaises(ValidationFailedException, lambda: extract_integer("s"))
def test_extract_integers(self):
with self.subTest("empty"):
with app.test_request_context("/"):
self.assertIsNone(extract_integers("s"))
with self.subTest("single"):
with app.test_request_context("/?s=1"):
self.assertEqual(extract_integers("s"), [1])
with self.subTest("multiple"):
with app.test_request_context("/?s=1,2"):
self.assertEqual(extract_integers("s"), [1,2])
with self.subTest("multiple param"):
with app.test_request_context("/?s=1&s=2"):
self.assertEqual(extract_integers("s"), [1,2])
with self.subTest("multiple param mixed"):
with app.test_request_context("/?s=1&s=2,3"):
self.assertEqual(extract_integers("s"), [1, 2, 3])
with self.subTest("not a number"):
with app.test_request_context("/?s=a"):
self.assertRaises(ValidationFailedException, lambda: extract_integers("s"))
with self.subTest("simple range"):
with app.test_request_context("/?s=1-2"):
self.assertEqual(extract_integers("s"), [(1, 2)])
with self.subTest("inverted range"):
with app.test_request_context("/?s=2-1"):
self.assertRaises(ValidationFailedException, lambda: extract_integers("s"))
with self.subTest("single range"):
with app.test_request_context("/?s=1-1"):
self.assertEqual(extract_integers("s"), [1])
def test_extract_date(self):
with self.subTest("empty"):
with app.test_request_context("/"):
self.assertIsNone(extract_date("s"))
with self.subTest("single"):
with app.test_request_context("/?s=2020-01-01"):
self.assertEqual(extract_date("s"), 20200101)
with app.test_request_context("/?s=20200101"):
self.assertEqual(extract_date("s"), 20200101)
with self.subTest("not a date"):
with app.test_request_context("/?s=abc"):
self.assertRaises(ValidationFailedException, lambda: extract_date("s"))
def test_extract_dates(self):
with self.subTest("empty"):
with app.test_request_context("/"):
self.assertIsNone(extract_dates("s"))
with self.subTest("single"):
with app.test_request_context("/?s=20200101"):
self.assertEqual(extract_dates("s"), [20200101])
with self.subTest("multiple"):
with app.test_request_context("/?s=20200101,20200102"):
self.assertEqual(extract_dates("s"), [20200101, 20200102])
with self.subTest("multiple param"):
with app.test_request_context("/?s=20200101&s=20200102"):
self.assertEqual(extract_dates("s"), [20200101, 20200102])
with self.subTest("multiple param mixed"):
with app.test_request_context("/?s=20200101&s=20200102,20200103"):
self.assertEqual(extract_dates("s"), [20200101, 20200102, 20200103])
with self.subTest("single iso"):
with app.test_request_context("/?s=2020-01-01"):
self.assertEqual(extract_dates("s"), [20200101])
with self.subTest("multiple iso"):
with app.test_request_context("/?s=2020-01-01,2020-01-02"):
self.assertEqual(extract_dates("s"), [20200101, 20200102])
with self.subTest("multiple param iso"):
with app.test_request_context("/?s=2020-01-01&s=2020-01-02"):
self.assertEqual(extract_dates("s"), [20200101, 20200102])
with self.subTest("multiple param mixed iso"):
with app.test_request_context("/?s=2020-01-01&s=2020-01-02,2020-01-03"):
self.assertEqual(extract_dates("s"), [20200101, 20200102, 20200103])
with self.subTest("not a date"):
with app.test_request_context("/?s=a"):
self.assertRaises(ValidationFailedException, lambda: extract_dates("s"))
with self.subTest("simple range"):
with app.test_request_context("/?s=20200101-20200102"):
self.assertEqual(extract_dates("s"), [(20200101, 20200102)])
with self.subTest("inverted range"):
with app.test_request_context("/?s=20200102-20200101"):
self.assertRaises(ValidationFailedException, lambda: extract_dates("s"))
with self.subTest("single range"):
with app.test_request_context("/?s=20200101-20200101"):
self.assertEqual(extract_dates("s"), [20200101])
with self.subTest("simple range iso"):
with app.test_request_context("/?s=2020-01-01:2020-01-02"):
self.assertEqual(extract_dates("s"), [(20200101, 20200102)])
with self.subTest("inverted range iso"):
with app.test_request_context("/?s=2020-01-02:2020-01-01"):
self.assertRaises(ValidationFailedException, lambda: extract_dates("s"))
with self.subTest("single range iso"):
with app.test_request_context("/?s=2020-01-01:2020-01-01"):
self.assertEqual(extract_dates("s"), [20200101])
| mit |
kobejean/tensorflow | tensorflow/python/autograph/converters/name_scopes_test.py | 3 | 3158 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for for_canonicalization module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import name_scopes
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class FunctionNameScopeTransformer(converter_testing.TestCase):
def test_basic(self):
def test_fn(l):
"""This should stay here."""
a = 1
l += a
return l
with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
self.assertEqual('This should stay here.', result.test_fn.__doc__)
def test_long_docstring(self):
def test_fn(l):
"""Multi-line docstring.
Args:
l: A thing.
Returns:
l
"""
return l + 1
with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
result_op = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', result_op.op.name)
self.assertIn('Multi-line docstring.', result.test_fn.__doc__)
self.assertIn('Returns:', result.test_fn.__doc__)
def test_nested_functions(self):
def test_fn(l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
with self.converted(test_fn, name_scopes, {}, ops.name_scope) as result:
first, second = result.test_fn(constant_op.constant(1))
self.assertIn('test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('test_fn/inner_fn/', second.op.name)
def test_method(self):
class TestClass(object):
def test_fn(self, l):
def inner_fn(i):
return i + 1
l += 1
return l, inner_fn(l)
ns = {'TestClass': TestClass}
node, ctx = self.prepare(TestClass, ns, owner_type=TestClass)
node = name_scopes.transform(node, ctx)
with self.compiled(node, {}, ops.name_scope) as result:
first, second = result.TestClass().test_fn(constant_op.constant(1))
self.assertIn('TestClass/test_fn/', first.op.name)
self.assertNotIn('inner_fn', first.op.name)
self.assertIn('TestClass/test_fn/inner_fn/', second.op.name)
if __name__ == '__main__':
test.main()
| apache-2.0 |
alexthered/kienhoc-platform | lms/djangoapps/courseware/tests/test_views.py | 7 | 50129 | # coding=UTF-8
"""
Tests courseware views.py
"""
import cgi
from urllib import urlencode
import ddt
import json
import itertools
import unittest
from datetime import datetime
from HTMLParser import HTMLParser
from nose.plugins.attrib import attr
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponseBadRequest
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
from mock import MagicMock, patch, create_autospec, Mock
from opaque_keys.edx.locations import Location, SlashSeparatedCourseKey
from pytz import UTC
from xblock.core import XBlock
from xblock.fields import String, Scope
from xblock.fragment import Fragment
import courseware.views as views
import shoppingcart
from certificates import api as certs_api
from certificates.models import CertificateStatuses, CertificateGenerationConfiguration
from certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.model_data import set_score
from courseware.testutils import RenderXBlockTestMixin
from courseware.tests.factories import StudentModuleFactory
from courseware.user_state_client import DjangoXBlockUserStateClient
from edxmako.tests import mako_middleware_process_request
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from student.models import CourseEnrollment
from student.tests.factories import AdminFactory, UserFactory, CourseEnrollmentFactory
from util.tests.test_date_utils import fake_ugettext, fake_pgettext
from util.url import reload_django_url_config
from util.views import ensure_valid_course_key
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import TEST_DATA_MIXED_TOY_MODULESTORE
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, check_mongo_calls
@attr('shard_1')
class TestJumpTo(ModuleStoreTestCase):
"""
Check the jumpto link for a course.
"""
MODULESTORE = TEST_DATA_MIXED_TOY_MODULESTORE
def setUp(self):
super(TestJumpTo, self).setUp()
# Use toy course from XML
self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
def test_jumpto_invalid_location(self):
location = self.course_key.make_usage_key(None, 'NoSuchPlace')
# This is fragile, but unfortunately the problem is that within the LMS we
# can't use the reverse calls from the CMS
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@unittest.skip
def test_jumpto_from_chapter(self):
location = self.course_key.make_usage_key('chapter', 'Overview')
jumpto_url = '{0}/{1}/jump_to/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
@unittest.skip
def test_jumpto_id(self):
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), 'Overview')
expected = 'courses/edX/toy/2012_Fall/courseware/Overview/'
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_from_section(self):
course = CourseFactory.create()
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
section = ItemFactory.create(category='sequential', parent_location=chapter.location)
expected = 'courses/{course_id}/courseware/{chapter_id}/{section_id}/?{activate_block_id}'.format(
course_id=unicode(course.id),
chapter_id=chapter.url_name,
section_id=section.url_name,
activate_block_id=urlencode({'activate_block_id': unicode(section.location)})
)
jumpto_url = '{0}/{1}/jump_to/{2}'.format(
'/courses',
unicode(course.id),
unicode(section.location),
)
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_from_module(self):
course = CourseFactory.create()
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
section = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical1 = ItemFactory.create(category='vertical', parent_location=section.location)
vertical2 = ItemFactory.create(category='vertical', parent_location=section.location)
module1 = ItemFactory.create(category='html', parent_location=vertical1.location)
module2 = ItemFactory.create(category='html', parent_location=vertical2.location)
expected = 'courses/{course_id}/courseware/{chapter_id}/{section_id}/1?{activate_block_id}'.format(
course_id=unicode(course.id),
chapter_id=chapter.url_name,
section_id=section.url_name,
activate_block_id=urlencode({'activate_block_id': unicode(module1.location)})
)
jumpto_url = '{0}/{1}/jump_to/{2}'.format(
'/courses',
unicode(course.id),
unicode(module1.location),
)
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
expected = 'courses/{course_id}/courseware/{chapter_id}/{section_id}/2?{activate_block_id}'.format(
course_id=unicode(course.id),
chapter_id=chapter.url_name,
section_id=section.url_name,
activate_block_id=urlencode({'activate_block_id': unicode(module2.location)})
)
jumpto_url = '{0}/{1}/jump_to/{2}'.format(
'/courses',
unicode(course.id),
unicode(module2.location),
)
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_from_nested_module(self):
course = CourseFactory.create()
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
section = ItemFactory.create(category='sequential', parent_location=chapter.location)
vertical = ItemFactory.create(category='vertical', parent_location=section.location)
nested_section = ItemFactory.create(category='sequential', parent_location=vertical.location)
nested_vertical1 = ItemFactory.create(category='vertical', parent_location=nested_section.location)
# put a module into nested_vertical1 for completeness
ItemFactory.create(category='html', parent_location=nested_vertical1.location)
nested_vertical2 = ItemFactory.create(category='vertical', parent_location=nested_section.location)
module2 = ItemFactory.create(category='html', parent_location=nested_vertical2.location)
# internal position of module2 will be 1_2 (2nd item withing 1st item)
expected = 'courses/{course_id}/courseware/{chapter_id}/{section_id}/1?{activate_block_id}'.format(
course_id=unicode(course.id),
chapter_id=chapter.url_name,
section_id=section.url_name,
activate_block_id=urlencode({'activate_block_id': unicode(module2.location)})
)
jumpto_url = '{0}/{1}/jump_to/{2}'.format(
'/courses',
unicode(course.id),
unicode(module2.location),
)
response = self.client.get(jumpto_url)
self.assertRedirects(response, expected, status_code=302, target_status_code=302)
def test_jumpto_id_invalid_location(self):
location = Location('edX', 'toy', 'NoSuchPlace', None, None, None)
jumpto_url = '{0}/{1}/jump_to_id/{2}'.format('/courses', self.course_key.to_deprecated_string(), location.to_deprecated_string())
response = self.client.get(jumpto_url)
self.assertEqual(response.status_code, 404)
@attr('shard_1')
@ddt.ddt
class ViewsTestCase(ModuleStoreTestCase):
"""
Tests for views.py methods.
"""
def setUp(self):
super(ViewsTestCase, self).setUp()
self.course = CourseFactory.create()
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.section = ItemFactory.create(category='sequential', parent_location=self.chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
self.vertical = ItemFactory.create(category='vertical', parent_location=self.section.location)
self.component = ItemFactory.create(category='problem', parent_location=self.vertical.location)
self.course_key = self.course.id
self.user = UserFactory(username='dummy', password='123456', email='test@mit.edu')
self.date = datetime(2013, 1, 22, tzinfo=UTC)
self.enrollment = CourseEnrollment.enroll(self.user, self.course_key)
self.enrollment.created = self.date
self.enrollment.save()
self.request_factory = RequestFactory()
chapter = 'Overview'
self.chapter_url = '%s/%s/%s' % ('/courses', self.course_key, chapter)
self.org = u"ꜱᴛᴀʀᴋ ɪɴᴅᴜꜱᴛʀɪᴇꜱ"
self.org_html = "<p>'+Stark/Industries+'</p>"
@unittest.skipUnless(settings.FEATURES.get('ENABLE_SHOPPING_CART'), "Shopping Cart not enabled in settings")
@patch.dict(settings.FEATURES, {'ENABLE_PAID_COURSE_REGISTRATION': True})
def test_course_about_in_cart(self):
in_cart_span = '<span class="add-to-cart">'
# don't mock this course due to shopping cart existence checking
course = CourseFactory.create(org="new", number="unenrolled", display_name="course")
request = self.request_factory.get(reverse('about_course', args=[course.id.to_deprecated_string()]))
request.user = AnonymousUser()
mako_middleware_process_request(request)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# authenticated user with nothing in cart
request.user = self.user
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertNotIn(in_cart_span, response.content)
# now add the course to the cart
cart = shoppingcart.models.Order.get_cart_for_user(self.user)
shoppingcart.models.PaidCourseRegistration.add_to_order(cart, course.id)
response = views.course_about(request, course.id.to_deprecated_string())
self.assertEqual(response.status_code, 200)
self.assertIn(in_cart_span, response.content)
def test_user_groups(self):
# depreciated function
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertEqual(views.user_groups(mock_user), [])
def test_get_current_child(self):
self.assertIsNone(views.get_current_child(MagicMock()))
mock_xmodule = MagicMock()
mock_xmodule.position = -1
mock_xmodule.get_display_items.return_value = ['one', 'two']
self.assertEqual(views.get_current_child(mock_xmodule), 'one')
mock_xmodule_2 = MagicMock()
mock_xmodule_2.position = 3
mock_xmodule_2.get_display_items.return_value = []
self.assertIsNone(views.get_current_child(mock_xmodule_2))
def test_redirect_to_course_position(self):
mock_module = MagicMock()
mock_module.descriptor.id = 'Underwater Basketweaving'
mock_module.position = 3
mock_module.get_display_items.return_value = []
self.assertRaises(Http404, views.redirect_to_course_position,
mock_module, views.CONTENT_DEPTH)
def test_invalid_course_id(self):
response = self.client.get('/courses/MITx/3.091X/')
self.assertEqual(response.status_code, 404)
def test_incomplete_course_id(self):
response = self.client.get('/courses/MITx/')
self.assertEqual(response.status_code, 404)
def test_index_invalid_position(self):
request_url = '/'.join([
'/courses',
self.course.id.to_deprecated_string(),
'courseware',
self.chapter.location.name,
self.section.location.name,
'f'
])
self.client.login(username=self.user.username, password="123456")
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_unicode_handling_in_url(self):
url_parts = [
'/courses',
self.course.id.to_deprecated_string(),
'courseware',
self.chapter.location.name,
self.section.location.name,
'1'
]
self.client.login(username=self.user.username, password="123456")
for idx, val in enumerate(url_parts):
url_parts_copy = url_parts[:]
url_parts_copy[idx] = val + u'χ'
request_url = '/'.join(url_parts_copy)
response = self.client.get(request_url)
self.assertEqual(response.status_code, 404)
def test_registered_for_course(self):
self.assertFalse(views.registered_for_course('Basketweaving', None))
mock_user = MagicMock()
mock_user.is_authenticated.return_value = False
self.assertFalse(views.registered_for_course('dummy', mock_user))
mock_course = MagicMock()
mock_course.id = self.course_key
self.assertTrue(views.registered_for_course(mock_course, self.user))
@override_settings(PAID_COURSE_REGISTRATION_CURRENCY=["USD", "$"])
def test_get_cosmetic_display_price(self):
"""
Check that get_cosmetic_display_price() returns the correct price given its inputs.
"""
registration_price = 99
self.course.cosmetic_display_price = 10
# Since registration_price is set, it overrides the cosmetic_display_price and should be returned
self.assertEqual(views.get_cosmetic_display_price(self.course, registration_price), "$99")
registration_price = 0
# Since registration_price is not set, cosmetic_display_price should be returned
self.assertEqual(views.get_cosmetic_display_price(self.course, registration_price), "$10")
self.course.cosmetic_display_price = 0
# Since both prices are not set, there is no price, thus "Free"
self.assertEqual(views.get_cosmetic_display_price(self.course, registration_price), "Free")
def test_jump_to_invalid(self):
# TODO add a test for invalid location
# TODO add a test for no data *
request = self.request_factory.get(self.chapter_url)
self.assertRaisesRegexp(Http404, 'Invalid course_key or usage_key', views.jump_to,
request, 'bar', ())
@unittest.skip
def test_no_end_on_about_page(self):
# Toy course has no course end date or about/end_date blob
self.verify_end_date('edX/toy/TT_2012_Fall')
@unittest.skip
def test_no_end_about_blob(self):
# test_end has a course end date, no end_date HTML blob
self.verify_end_date("edX/test_end/2012_Fall", "Sep 17, 2015")
@unittest.skip
def test_about_blob_end_date(self):
# test_about_blob_end_date has both a course end date and an end_date HTML blob.
# HTML blob wins
self.verify_end_date("edX/test_about_blob_end_date/2012_Fall", "Learning never ends")
def verify_end_date(self, course_id, expected_end_text=None):
"""
Visits the about page for `course_id` and tests that both the text "Classes End", as well
as the specified `expected_end_text`, is present on the page.
If `expected_end_text` is None, verifies that the about page *does not* contain the text
"Classes End".
"""
request = self.request_factory.get("foo")
request.user = self.user
# TODO: Remove the dependency on MakoMiddleware (by making the views explicitly supply a RequestContext)
mako_middleware_process_request(request)
result = views.course_about(request, course_id)
if expected_end_text is not None:
self.assertContains(result, "Classes End")
self.assertContains(result, expected_end_text)
else:
self.assertNotContains(result, "Classes End")
def test_submission_history_accepts_valid_ids(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': self.component.location.to_deprecated_string(),
})
response = self.client.get(url)
# Tests that we do not get an "Invalid x" response when passing correct arguments to view
self.assertFalse('Invalid' in response.content)
def test_submission_history_xss(self):
# log into a staff account
admin = AdminFactory()
self.client.login(username=admin.username, password='test')
# try it with an existing user and a malicious location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': 'dummy',
'location': '<script>alert("hello");</script>'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
# try it with a malicious user and a non-existent location
url = reverse('submission_history', kwargs={
'course_id': self.course_key.to_deprecated_string(),
'student_username': '<script>alert("hello");</script>',
'location': 'dummy'
})
response = self.client.get(url)
self.assertFalse('<script>' in response.content)
def test_submission_history_contents(self):
# log into a staff account
admin = AdminFactory.create()
self.client.login(username=admin.username, password='test')
usage_key = self.course_key.make_usage_key('problem', 'test-history')
state_client = DjangoXBlockUserStateClient(admin)
# store state via the UserStateClient
state_client.set(
username=admin.username,
block_key=usage_key,
state={'field_a': 'x', 'field_b': 'y'}
)
set_score(admin.id, usage_key, 0, 3)
state_client.set(
username=admin.username,
block_key=usage_key,
state={'field_a': 'a', 'field_b': 'b'}
)
set_score(admin.id, usage_key, 3, 3)
url = reverse('submission_history', kwargs={
'course_id': unicode(self.course_key),
'student_username': admin.username,
'location': unicode(usage_key),
})
response = self.client.get(url)
response_content = HTMLParser().unescape(response.content)
# We have update the state 4 times: twice to change content, and twice
# to set the scores. We'll check that the identifying content from each is
# displayed (but not the order), and also the indexes assigned in the output
# #1 - #4
self.assertIn('#1', response_content)
self.assertIn(json.dumps({'field_a': 'a', 'field_b': 'b'}, sort_keys=True, indent=2), response_content)
self.assertIn("Score: 0.0 / 3.0", response_content)
self.assertIn(json.dumps({'field_a': 'x', 'field_b': 'y'}, sort_keys=True, indent=2), response_content)
self.assertIn("Score: 3.0 / 3.0", response_content)
self.assertIn('#4', response_content)
def _email_opt_in_checkbox(self, response, org_name_string=None):
"""Check if the email opt-in checkbox appears in the response content."""
checkbox_html = '<input id="email-opt-in" type="checkbox" name="opt-in" class="email-opt-in" value="true" checked>'
if org_name_string:
# Verify that the email opt-in checkbox appears, and that the expected
# organization name is displayed.
self.assertContains(response, checkbox_html, html=True)
self.assertContains(response, org_name_string)
else:
# Verify that the email opt-in checkbox does not appear
self.assertNotContains(response, checkbox_html, html=True)
@attr('shard_1')
# setting TIME_ZONE_DISPLAYED_FOR_DEADLINES explicitly
@override_settings(TIME_ZONE_DISPLAYED_FOR_DEADLINES="UTC")
class BaseDueDateTests(ModuleStoreTestCase):
"""
Base class that verifies that due dates are rendered correctly on a page
"""
__test__ = False
def get_text(self, course): # pylint: disable=unused-argument
"""Return the rendered text for the page to be verified"""
raise NotImplementedError
def set_up_course(self, **course_kwargs):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory.create(**course_kwargs)
chapter = ItemFactory.create(category='chapter', parent_location=course.location)
section = ItemFactory.create(category='sequential', parent_location=chapter.location, due=datetime(2013, 9, 18, 11, 30, 00))
vertical = ItemFactory.create(category='vertical', parent_location=section.location)
ItemFactory.create(category='problem', parent_location=vertical.location)
course = modulestore().get_course(course.id)
self.assertIsNotNone(course.get_children()[0].get_children()[0].due)
CourseEnrollmentFactory(user=self.user, course_id=course.id)
return course
def setUp(self):
super(BaseDueDateTests, self).setUp()
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
self.time_with_tz = "due Sep 18, 2013 at 11:30 UTC"
self.time_without_tz = "due Sep 18, 2013 at 11:30"
def test_backwards_compatability(self):
# The test course being used has show_timezone = False in the policy file
# (and no due_date_display_format set). This is to test our backwards compatibility--
# in course_module's init method, the date_display_format will be set accordingly to
# remove the timezone.
course = self.set_up_course(due_date_display_format=None, show_timezone=False)
text = self.get_text(course)
self.assertIn(self.time_without_tz, text)
self.assertNotIn(self.time_with_tz, text)
# Test that show_timezone has been cleared (which means you get the default value of True).
self.assertTrue(course.show_timezone)
def test_defaults(self):
course = self.set_up_course()
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_none(self):
# Same for setting the due date to None
course = self.set_up_course(due_date_display_format=None)
text = self.get_text(course)
self.assertIn(self.time_with_tz, text)
def test_format_plain_text(self):
# plain text due date
course = self.set_up_course(due_date_display_format="foobar")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due foobar", text)
def test_format_date(self):
# due date with no time
course = self.set_up_course(due_date_display_format=u"%b %d %y")
text = self.get_text(course)
self.assertNotIn(self.time_with_tz, text)
self.assertIn("due Sep 18 13", text)
def test_format_hidden(self):
# hide due date completely
course = self.set_up_course(due_date_display_format=u"")
text = self.get_text(course)
self.assertNotIn("due ", text)
def test_format_invalid(self):
# improperly formatted due_date_display_format falls through to default
# (value of show_timezone does not matter-- setting to False to make that clear).
course = self.set_up_course(due_date_display_format=u"%%%", show_timezone=False)
text = self.get_text(course)
self.assertNotIn("%%%", text)
self.assertIn(self.time_with_tz, text)
class TestProgressDueDate(BaseDueDateTests):
"""
Test that the progress page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the progress page """
mako_middleware_process_request(self.request)
return views.progress(self.request, course_id=course.id.to_deprecated_string(), student_id=self.user.id).content
class TestAccordionDueDate(BaseDueDateTests):
"""
Test that the accordion page displays due dates correctly
"""
__test__ = True
def get_text(self, course):
""" Returns the HTML for the accordion """
return views.render_accordion(
self.request.user, self.request, course,
unicode(course.get_children()[0].scope_ids.usage_id), None, None
)
@attr('shard_1')
class StartDateTests(ModuleStoreTestCase):
"""
Test that start dates are properly localized and displayed on the student
dashboard.
"""
def setUp(self):
super(StartDateTests, self).setUp()
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
def set_up_course(self):
"""
Create a stock course with a specific due date.
:param course_kwargs: All kwargs are passed to through to the :class:`CourseFactory`
"""
course = CourseFactory.create(start=datetime(2013, 9, 16, 7, 17, 28))
course = modulestore().get_course(course.id)
return course
def get_about_text(self, course_key):
"""
Get the text of the /about page for the course.
"""
text = views.course_about(self.request, course_key.to_deprecated_string()).content
return text
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Sep"): "SEPTEMBER",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
def test_format_localized_in_studio_course(self):
course = self.set_up_course()
text = self.get_about_text(course.id)
# The start date is set in the set_up_course function above.
self.assertIn("2013-SEPTEMBER-16", text)
@patch('util.date_utils.pgettext', fake_pgettext(translations={
("abbreviated month name", "Jul"): "JULY",
}))
@patch('util.date_utils.ugettext', fake_ugettext(translations={
"SHORT_DATE_FORMAT": "%Y-%b-%d",
}))
@unittest.skip
def test_format_localized_in_xml_course(self):
text = self.get_about_text(SlashSeparatedCourseKey('edX', 'toy', 'TT_2012_Fall'))
# The start date is set in common/test/data/two_toys/policies/TT_2012_Fall/policy.json
self.assertIn("2015-JULY-17", text)
@attr('shard_1')
@ddt.ddt
class ProgressPageTests(ModuleStoreTestCase):
"""
Tests that verify that the progress page works correctly.
"""
def setUp(self):
super(ProgressPageTests, self).setUp()
self.request_factory = RequestFactory()
self.user = UserFactory.create()
self.request = self.request_factory.get("foo")
self.request.user = self.user
mako_middleware_process_request(self.request)
self.setup_course()
def setup_course(self, **options):
"""Create the test course."""
course = CourseFactory.create(
start=datetime(2013, 9, 16, 7, 17, 28),
grade_cutoffs={u'çü†øƒƒ': 0.75, 'Pass': 0.5},
**options
)
# pylint: disable=attribute-defined-outside-init
self.course = modulestore().get_course(course.id)
CourseEnrollmentFactory(user=self.user, course_id=self.course.id)
self.chapter = ItemFactory.create(category='chapter', parent_location=self.course.location)
self.section = ItemFactory.create(category='sequential', parent_location=self.chapter.location)
self.vertical = ItemFactory.create(category='vertical', parent_location=self.section.location)
@ddt.data('"><script>alert(1)</script>', '<script>alert(1)</script>', '</script><script>alert(1)</script>')
def test_progress_page_xss_prevent(self, malicious_code):
"""
Test that XSS attack is prevented
"""
resp = views.progress(self.request, course_id=unicode(self.course.id), student_id=self.user.id)
self.assertEqual(resp.status_code, 200)
# Test that malicious code does not appear in html
self.assertNotIn(malicious_code, resp.content)
def test_pure_ungraded_xblock(self):
ItemFactory.create(category='acid', parent_location=self.vertical.location)
resp = views.progress(self.request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_student_progress_with_valid_and_invalid_id(self, default_store):
"""
Check that invalid 'student_id' raises Http404 for both old mongo and
split mongo courses.
"""
# Create new course with respect to 'default_store'
self.course = CourseFactory.create(default_store=default_store)
# Invalid Student Ids (Integer and Non-int)
invalid_student_ids = [
991021,
'azU3N_8$',
]
for invalid_id in invalid_student_ids:
self.assertRaises(
Http404, views.progress,
self.request,
course_id=unicode(self.course.id),
student_id=invalid_id
)
# Enroll student into course
CourseEnrollment.enroll(self.user, self.course.id, mode='honor')
resp = views.progress(self.request, course_id=self.course.id.to_deprecated_string(), student_id=self.user.id)
# Assert that valid 'student_id' returns 200 status
self.assertEqual(resp.status_code, 200)
def test_non_asci_grade_cutoffs(self):
resp = views.progress(self.request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(resp.status_code, 200)
def test_generate_cert_config(self):
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertNotContains(resp, 'Request Certificate')
# Enable the feature, but do not enable it for this course
CertificateGenerationConfiguration(enabled=True).save()
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertNotContains(resp, 'Request Certificate')
# Enable certificate generation for this course
certs_api.set_cert_generation_enabled(self.course.id, True)
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertNotContains(resp, 'Request Certificate')
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': True})
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
'grade_breakdown': []}))
def test_view_certificate_link(self):
"""
If certificate web view is enabled then certificate web view button should appear for user who certificate is
available/generated
"""
certificate = GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
download_url="http://www.example.com/certificate.pdf",
mode='honor'
)
# Enable the feature, but do not enable it for this course
CertificateGenerationConfiguration(enabled=True).save()
# Enable certificate generation for this course
certs_api.set_cert_generation_enabled(self.course.id, True)
#course certificate configurations
certificates = [
{
'id': 1,
'name': 'Name 1',
'description': 'Description 1',
'course_title': 'course_title_1',
'signatories': [],
'version': 1,
'is_active': True
}
]
self.course.certificates = {'certificates': certificates}
self.course.cert_html_view_enabled = True
self.course.save()
self.store.update_item(self.course, self.user.id)
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertContains(resp, u"View Certificate")
self.assertContains(resp, u"You can keep working for a higher grade")
cert_url = certs_api.get_certificate_url(
user_id=self.user.id,
course_id=self.course.id
)
self.assertContains(resp, cert_url)
# when course certificate is not active
certificates[0]['is_active'] = False
self.store.update_item(self.course, self.user.id)
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertNotContains(resp, u"View Your Certificate")
self.assertNotContains(resp, u"You can now view your certificate")
self.assertContains(resp, u"We're creating your certificate.")
@patch.dict('django.conf.settings.FEATURES', {'CERTIFICATES_HTML_VIEW': False})
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75, 'section_breakdown': [],
'grade_breakdown': []}))
def test_view_certificate_link_hidden(self):
"""
If certificate web view is disabled then certificate web view button should not appear for user who certificate
is available/generated
"""
GeneratedCertificateFactory.create(
user=self.user,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
download_url="http://www.example.com/certificate.pdf",
mode='honor'
)
# Enable the feature, but do not enable it for this course
CertificateGenerationConfiguration(enabled=True).save()
# Enable certificate generation for this course
certs_api.set_cert_generation_enabled(self.course.id, True)
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertContains(resp, u"Download Your Certificate")
@ddt.data(
*itertools.product(((18, 4, True), (18, 4, False)), (True, False))
)
@ddt.unpack
def test_query_counts(self, (sql_calls, mongo_calls, self_paced), self_paced_enabled):
"""Test that query counts remain the same for self-paced and instructor-paced courses."""
SelfPacedConfiguration(enabled=self_paced_enabled).save()
self.setup_course(self_paced=self_paced)
with self.assertNumQueries(sql_calls), check_mongo_calls(mongo_calls):
resp = views.progress(self.request, course_id=unicode(self.course.id))
self.assertEqual(resp.status_code, 200)
@attr('shard_1')
class VerifyCourseKeyDecoratorTests(TestCase):
"""
Tests for the ensure_valid_course_key decorator.
"""
def setUp(self):
super(VerifyCourseKeyDecoratorTests, self).setUp()
self.request = RequestFactory().get("foo")
self.valid_course_id = "edX/test/1"
self.invalid_course_id = "edX/"
def test_decorator_with_valid_course_id(self):
mocked_view = create_autospec(views.course_about)
view_function = ensure_valid_course_key(mocked_view)
view_function(self.request, course_id=self.valid_course_id)
self.assertTrue(mocked_view.called)
def test_decorator_with_invalid_course_id(self):
mocked_view = create_autospec(views.course_about)
view_function = ensure_valid_course_key(mocked_view)
self.assertRaises(Http404, view_function, self.request, course_id=self.invalid_course_id)
self.assertFalse(mocked_view.called)
@attr('shard_1')
class IsCoursePassedTests(ModuleStoreTestCase):
"""
Tests for the is_course_passed helper function
"""
SUCCESS_CUTOFF = 0.5
def setUp(self):
super(IsCoursePassedTests, self).setUp()
self.student = UserFactory()
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course',
grade_cutoffs={'cutoff': 0.75, 'Pass': self.SUCCESS_CUTOFF}
)
self.request = RequestFactory()
self.request.user = self.student
def test_user_fails_if_not_clear_exam(self):
# If user has not grade then false will return
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
@patch('courseware.grades.grade', Mock(return_value={'percent': 0.9}))
def test_user_pass_if_percent_appears_above_passing_point(self):
# Mocking the grades.grade
# If user has above passing marks then True will return
self.assertTrue(views.is_course_passed(self.course, None, self.student, self.request))
@patch('courseware.grades.grade', Mock(return_value={'percent': 0.2}))
def test_user_fail_if_percent_appears_below_passing_point(self):
# Mocking the grades.grade
# If user has below passing marks then False will return
self.assertFalse(views.is_course_passed(self.course, None, self.student, self.request))
@patch('courseware.grades.grade', Mock(return_value={'percent': SUCCESS_CUTOFF}))
def test_user_with_passing_marks_and_achieved_marks_equal(self):
# Mocking the grades.grade
# If user's achieved passing marks are equal to the required passing
# marks then it will return True
self.assertTrue(views.is_course_passed(self.course, None, self.student, self.request))
@attr('shard_1')
class GenerateUserCertTests(ModuleStoreTestCase):
"""
Tests for the view function Generated User Certs
"""
def setUp(self):
super(GenerateUserCertTests, self).setUp()
self.student = UserFactory(username='dummy', password='123456', email='test@mit.edu')
self.course = CourseFactory.create(
org='edx',
number='verified',
display_name='Verified Course',
grade_cutoffs={'cutoff': 0.75, 'Pass': 0.5}
)
self.enrollment = CourseEnrollment.enroll(self.student, self.course.id, mode='honor')
self.request = RequestFactory()
self.client.login(username=self.student, password='123456')
self.url = reverse('generate_user_cert', kwargs={'course_id': unicode(self.course.id)})
def test_user_with_out_passing_grades(self):
# If user has no grading then json will return failed message and badrequest code
resp = self.client.post(self.url)
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Your certificate will be available when you pass the course.", resp.content)
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
def test_user_with_passing_grade(self):
# If user has above passing grading then json will return cert generating message and
# status valid code
# mocking xqueue and analytics
analytics_patcher = patch('courseware.views.analytics')
mock_tracker = analytics_patcher.start()
self.addCleanup(analytics_patcher.stop)
with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_send_to_queue:
mock_send_to_queue.return_value = (0, "Successfully queued")
resp = self.client.post(self.url)
self.assertEqual(resp.status_code, 200)
#Verify Google Analytics event fired after generating certificate
mock_tracker.track.assert_called_once_with( # pylint: disable=no-member
self.student.id, # pylint: disable=no-member
'edx.bi.user.certificate.generate',
{
'category': 'certificates',
'label': unicode(self.course.id)
},
context={
'ip': '127.0.0.1',
'Google Analytics':
{'clientId': None}
}
)
mock_tracker.reset_mock()
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
def test_user_with_passing_existing_generating_cert(self):
# If user has passing grade but also has existing generating cert
# then json will return cert generating message with bad request code
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.generating,
mode='verified'
)
resp = self.client.post(self.url)
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Certificate is being created.", resp.content)
@patch('courseware.grades.grade', Mock(return_value={'grade': 'Pass', 'percent': 0.75}))
@override_settings(CERT_QUEUE='certificates', LMS_SEGMENT_KEY="foobar")
def test_user_with_passing_existing_downloadable_cert(self):
# If user has already downloadable certificate
# then json will return cert generating message with bad request code
GeneratedCertificateFactory.create(
user=self.student,
course_id=self.course.id,
status=CertificateStatuses.downloadable,
mode='verified'
)
resp = self.client.post(self.url)
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Certificate has already been created.", resp.content)
def test_user_with_non_existing_course(self):
# If try to access a course with valid key pattern then it will return
# bad request code with course is not valid message
resp = self.client.post('/courses/def/abc/in_valid/generate_user_cert')
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("Course is not valid", resp.content)
def test_user_with_invalid_course_id(self):
# If try to access a course with invalid key pattern then 404 will return
resp = self.client.post('/courses/def/generate_user_cert')
self.assertEqual(resp.status_code, 404)
def test_user_without_login_return_error(self):
# If user try to access without login should see a bad request status code with message
self.client.logout()
resp = self.client.post(self.url)
self.assertEqual(resp.status_code, HttpResponseBadRequest.status_code)
self.assertIn("You must be signed in to {platform_name} to create a certificate.".format(
platform_name=settings.PLATFORM_NAME
), resp.content)
class ActivateIDCheckerBlock(XBlock):
"""
XBlock for checking for an activate_block_id entry in the render context.
"""
# We don't need actual children to test this.
has_children = False
def student_view(self, context):
"""
A student view that displays the activate_block_id context variable.
"""
result = Fragment()
if 'activate_block_id' in context:
result.add_content(u"Activate Block ID: {block_id}</p>".format(block_id=context['activate_block_id']))
return result
class ViewCheckerBlock(XBlock):
"""
XBlock for testing user state in views.
"""
has_children = True
state = String(scope=Scope.user_state)
def student_view(self, context): # pylint: disable=unused-argument
"""
A student_view that asserts that the ``state`` field for this block
matches the block's usage_id.
"""
msg = "{} != {}".format(self.state, self.scope_ids.usage_id)
assert self.state == unicode(self.scope_ids.usage_id), msg
fragments = self.runtime.render_children(self)
result = Fragment(
content=u"<p>ViewCheckerPassed: {}</p>\n{}".format(
unicode(self.scope_ids.usage_id),
"\n".join(fragment.content for fragment in fragments),
)
)
return result
@attr('shard_1')
@ddt.ddt
class TestIndexView(ModuleStoreTestCase):
"""
Tests of the courseware.index view.
"""
@XBlock.register_temp_plugin(ViewCheckerBlock, 'view_checker')
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_student_state(self, default_store):
"""
Verify that saved student state is loaded for xblocks rendered in the index view.
"""
user = UserFactory()
with modulestore().default_store(default_store):
course = CourseFactory.create()
chapter = ItemFactory.create(parent=course, category='chapter')
section = ItemFactory.create(parent=chapter, category='view_checker', display_name="Sequence Checker")
vertical = ItemFactory.create(parent=section, category='view_checker', display_name="Vertical Checker")
block = ItemFactory.create(parent=vertical, category='view_checker', display_name="Block Checker")
for item in (section, vertical, block):
StudentModuleFactory.create(
student=user,
course_id=course.id,
module_state_key=item.scope_ids.usage_id,
state=json.dumps({'state': unicode(item.scope_ids.usage_id)})
)
CourseEnrollmentFactory(user=user, course_id=course.id)
request = RequestFactory().get(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(course.id),
'chapter': chapter.url_name,
'section': section.url_name,
}
)
)
request.user = user
mako_middleware_process_request(request)
# Trigger the assertions embedded in the ViewCheckerBlocks
response = views.index(request, unicode(course.id), chapter=chapter.url_name, section=section.url_name)
self.assertEquals(response.content.count("ViewCheckerPassed"), 3)
@XBlock.register_temp_plugin(ActivateIDCheckerBlock, 'id_checker')
def test_activate_block_id(self):
user = UserFactory()
course = CourseFactory.create()
chapter = ItemFactory.create(parent=course, category='chapter')
section = ItemFactory.create(parent=chapter, category='sequential', display_name="Sequence")
vertical = ItemFactory.create(parent=section, category='vertical', display_name="Vertical")
ItemFactory.create(parent=vertical, category='id_checker', display_name="ID Checker")
CourseEnrollmentFactory(user=user, course_id=course.id)
request = RequestFactory().get(
reverse(
'courseware_section',
kwargs={
'course_id': unicode(course.id),
'chapter': chapter.url_name,
'section': section.url_name,
}
) + '?activate_block_id=test_block_id'
)
request.user = user
mako_middleware_process_request(request)
response = views.index(request, unicode(course.id), chapter=chapter.url_name, section=section.url_name)
self.assertIn("Activate Block ID: test_block_id", response.content)
class TestRenderXBlock(RenderXBlockTestMixin, ModuleStoreTestCase):
"""
Tests for the courseware.render_xblock endpoint.
This class overrides the get_response method, which is used by
the tests defined in RenderXBlockTestMixin.
"""
def setUp(self):
reload_django_url_config()
super(TestRenderXBlock, self).setUp()
def get_response(self, url_encoded_params=None):
"""
Overridable method to get the response from the endpoint that is being tested.
"""
url = reverse('render_xblock', kwargs={"usage_key_string": unicode(self.html_block.location)})
if url_encoded_params:
url += '?' + url_encoded_params
return self.client.get(url)
class TestRenderXBlockSelfPaced(TestRenderXBlock):
"""
Test rendering XBlocks for a self-paced course. Relies on the query
count assertions in the tests defined by RenderXBlockMixin.
"""
def setUp(self):
super(TestRenderXBlockSelfPaced, self).setUp()
SelfPacedConfiguration(enabled=True).save()
def course_options(self):
return {'self_paced': True}
| agpl-3.0 |
Simage/shinken | shinken/acknowledge.py | 14 | 3404 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
class Acknowledge:
"""
Allows you to acknowledge the current problem for the specified service.
By acknowledging the current problem, future notifications (for the same
servicestate) are disabled.
"""
id = 1
# Just to list the properties we will send as pickle
# so to others daemons, all but NOT REF
properties = {
'id': None,
'sticky': None,
'notify': None,
'end_time': None,
'author': None,
'comment': None,
}
# If the "sticky" option is set to one (1), the acknowledgement
# will remain until the service returns to an OK state. Otherwise
# the acknowledgement will automatically be removed when the
# service changes state. In this case Web interfaces set a value
# of (2).
#
# If the "notify" option is set to one (1), a notification will be
# sent out to contacts indicating that the current service problem
# has been acknowledged.
#
# <WTF??>
# If the "persistent" option is set to one (1), the comment
# associated with the acknowledgement will survive across restarts
# of the Shinken process. If not, the comment will be deleted the
# next time Shinken restarts. "persistent" not only means "survive
# restarts", but also
#
# => End of comment Missing!!
# </WTF??>
def __init__(self, ref, sticky, notify, persistent,
author, comment, end_time=0):
self.id = self.__class__.id
self.__class__.id += 1
self.ref = ref # pointer to srv or host we are applied
self.sticky = sticky
self.notify = notify
self.end_time = end_time
self.author = author
self.comment = comment
# Call by pickle for dataify the ackn
# because we DO NOT WANT REF in this pickleisation!
def __getstate__(self):
cls = self.__class__
# id is not in *_properties
res = {'id': self.id}
for prop in cls.properties:
if hasattr(self, prop):
res[prop] = getattr(self, prop)
return res
# Inversed function of getstate
def __setstate__(self, state):
cls = self.__class__
self.id = state['id']
for prop in cls.properties:
if prop in state:
setattr(self, prop, state[prop])
# If load a old ack, set the end_time to 0 which refers to infinite
if not hasattr(self, 'end_time'):
self.end_time = 0
| agpl-3.0 |
andmos/ansible | test/units/modules/network/nxos/test_nxos_bgp_af.py | 30 | 4173 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_bgp_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpAfModule(TestNxosModule):
module = nxos_bgp_af
def setUp(self):
super(TestNxosBgpAfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = None
def test_nxos_bgp_af(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf(self):
set_module_args(dict(asn=65535, vrf='test', afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'vrf test', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf_exists(self):
set_module_args(dict(asn=65535, vrf='test2', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_af_dampening_routemap(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening route-map route-map-a']
)
def test_nxos_bgp_af_dampening_manual(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening 5 1900 2000 10']
)
def test_nxos_bgp_af_dampening_mix(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'parameters are mutually exclusive: dampening_routemap, dampening_half_time')
def test_nxos_bgp_af_client(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
client_to_client=False))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'no client-to-client reflection']
)
| gpl-3.0 |
WFKipper/android_kernel_cyanogen_msm8916 | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
dahool/vertaal | openidmigration/views.py | 2 | 1578 | # -*- coding: utf-8 -*-
"""Copyright (c) 2014 Sergio Gabriel Teves
All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.conf import settings
from openidmigration import get_user_token
import logging
logger = logging.getLogger('vertaal.openidmigration')
def home(request):
return render_to_response("openidmig/message.html",
{'secondarysite': getattr(settings, 'ENABLE_MIG','')},
context_instance = RequestContext(request))
@login_required
def token_generation(request):
return render_to_response("openidmig/token.html",
{'tokenid': get_user_token(request.user),
'secondarysite': getattr(settings, 'ENABLE_MIG','')},
context_instance = RequestContext(request))
| gpl-3.0 |
obboy/django_base | config/urls.py | 1 | 1233 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("django_base.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
| bsd-3-clause |
cuongnb14/autoscaling-paas | project/dashboard/apiv1/serializers.py | 1 | 1969 | from django.contrib.auth.models import User, Group
from apiv1.models import *
from rest_framework import serializers
from apiv1.utils import *
import traceback
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name',)
class WebAppSerializer(serializers.ModelSerializer):
# status = serializers.SerializerMethodField('get_status')
# defautl it is get_cpus method
instances = serializers.SerializerMethodField()
def get_instances(self, app):
try:
marathon_client = get_marathon_client()
marathon_app = marathon_client.get_app("app-"+app.uuid)
return marathon_app.instances
except Exception as e:
return 0
class Meta:
model = WebApp
fields = ('name', 'github_url', 'min_instances', 'max_instances', 'env_hostname',"env_port","env_db_hostname","env_db_port","env_db_name","env_db_username","env_db_password","cpus","status","mem","instances", "autoscaling")
class DatabaseAppSerializer(serializers.ModelSerializer):
status = serializers.SerializerMethodField()
def get_status(self, database_app):
try:
mc = get_marathon_client()
mc.get_app("{}.database.{}".format(database_app.user.username,database_app.id))
return "running"
except Exception as e:
return "not running"
class Meta:
model = DatabaseApp
fields = ('id', 'host', 'port', 'root_password', 'status')
#depth = 1
class PolicySerializer(serializers.ModelSerializer):
class Meta:
model = Policy
fields = ('id', 'metric_type', 'upper_threshold','lower_threshold','instances_out','instances_in','scale_up_wait','scale_down_wait','disabled')
class MessageSerializer(serializers.Serializer):
status = serializers.CharField(max_length=45)
message = serializers.CharField(max_length=256)
| mit |
Ashaba/rms | rmslocalenv/lib/python2.7/site-packages/django/http/multipartparser.py | 39 | 24373 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if file_name:
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
if not file_name:
continue
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| mit |
gangadharkadam/v6_erp | erpnext/shopping_cart/doctype/shopping_cart_settings/test_shopping_cart_settings.py | 27 | 1528 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings import ShoppingCartSetupError
class TestShoppingCartSettings(unittest.TestCase):
def setUp(self):
frappe.db.sql("""delete from `tabSingles` where doctype="Shipping Cart Settings" """)
def get_cart_settings(self):
return frappe.get_doc({"doctype": "Shopping Cart Settings",
"company": "_Test Company"})
def test_exchange_rate_exists(self):
frappe.db.sql("""delete from `tabCurrency Exchange`""")
cart_settings = self.get_cart_settings()
cart_settings.price_list = "_Test Price List Rest of the World"
self.assertRaises(ShoppingCartSetupError, cart_settings.validate_exchange_rates_exist)
from erpnext.setup.doctype.currency_exchange.test_currency_exchange import test_records as \
currency_exchange_records
frappe.get_doc(currency_exchange_records[0]).insert()
cart_settings.validate_exchange_rates_exist()
def test_tax_rule_validation(self):
frappe.db.sql("update `tabTax Rule` set use_for_shopping_cart = 0")
frappe.db.commit()
cart_settings = self.get_cart_settings()
cart_settings.enabled = 1
if not frappe.db.get_value("Tax Rule", {"use_for_shopping_cart": 1}, "name"):
self.assertRaises(ShoppingCartSetupError, cart_settings.validate_tax_rule)
| agpl-3.0 |
wireservice/agate | agate/aggregations/quartiles.py | 4 | 1548 | #!/usr/bin/env python
from agate.aggregations.base import Aggregation
from agate.aggregations.has_nulls import HasNulls
from agate.aggregations.percentiles import Percentiles
from agate.data_types import Number
from agate.exceptions import DataTypeError
from agate.utils import Quantiles
from agate.warns import warn_null_calculation
class Quartiles(Aggregation):
"""
Calculate the quartiles of column based on its percentiles.
Quartiles will be equivalent to the the 25th, 50th and 75th percentiles.
"Zeroth" (min value) and "Fourth" (max value) quartiles are included for
reference and intuitive indexing.
See :class:`Percentiles` for implementation details.
This aggregation can not be applied to a :class:`.TableSet`.
:param column_name:
The name of a column containing :class:`.Number` data.
"""
def __init__(self, column_name):
self._column_name = column_name
def validate(self, table):
column = table.columns[self._column_name]
if not isinstance(column.data_type, Number):
raise DataTypeError('Quartiles can only be applied to columns containing Number data.')
has_nulls = HasNulls(self._column_name).run(table)
if has_nulls:
warn_null_calculation(self, column)
def run(self, table):
"""
:returns:
An instance of :class:`Quantiles`.
"""
percentiles = Percentiles(self._column_name).run(table)
return Quantiles([percentiles[i] for i in range(0, 101, 25)])
| mit |
misdoro/python-ase | ase/gui/colors.py | 2 | 29687 | from __future__ import print_function
# encoding: utf-8
"""colors.py - select how to color the atoms in the GUI."""
import gtk
from gettext import gettext as _
from ase.gui.widgets import pack, cancel_apply_ok, oops, help
import ase
from ase.data.colors import jmol_colors
import numpy as np
import colorsys
named_colors = ('Green', 'Yellow', 'Blue', 'Red', 'Orange', 'Cyan',
'Magenta', 'Black', 'White', 'Grey', 'Violet', 'Brown',
'Navy')
class ColorWindow(gtk.Window):
"A window for selecting how to color the atoms."
def __init__(self, gui):
gtk.Window.__init__(self)
self.gui = gui
self.colormode = gui.colormode
self.actual_colordata = None
self.colordata = {}
self.set_title(_("Colors"))
vbox = gtk.VBox()
self.add(vbox)
vbox.show()
# The main layout consists of two columns, the leftmost split in an upper and lower part.
self.maintable = gtk.Table(2,2)
pack(vbox, self.maintable)
self.methodbox = gtk.VBox()
self.methodbox.show()
self.maintable.attach(self.methodbox, 0, 1, 0, 1)
self.scalebox = gtk.VBox()
self.scalebox.show()
self.maintable.attach(self.scalebox, 0, 1, 1, 2)
self.colorbox = gtk.Frame()
self.colorbox.show()
self.maintable.attach(self.colorbox, 1, 2, 0, 2, gtk.EXPAND)
# Upper left: Choose how the atoms are colored.
lbl = gtk.Label(_("Choose how the atoms are colored:"))
pack(self.methodbox, [lbl])
self.radio_jmol = gtk.RadioButton(None, _('By atomic number, default "jmol" colors'))
self.radio_atno = gtk.RadioButton(self.radio_jmol,
_('By atomic number, user specified'))
self.radio_tag = gtk.RadioButton(self.radio_jmol, _('By tag'))
self.radio_force = gtk.RadioButton(self.radio_jmol, _('By force'))
self.radio_velocity = gtk.RadioButton(self.radio_jmol, _('By velocity'))
self.radio_charge = gtk.RadioButton(self.radio_jmol, _('By charge'))
self.radio_magnetic_moment = gtk.RadioButton(
self.radio_jmol, _('By magnetic moment'))
self.radio_coordination = gtk.RadioButton(
self.radio_jmol, _('By coordination'))
self.radio_manual = gtk.RadioButton(self.radio_jmol, _('Manually specified'))
self.radio_same = gtk.RadioButton(self.radio_jmol, _('All the same color'))
self.force_box = gtk.VBox()
self.velocity_box = gtk.VBox()
self.charge_box = gtk.VBox()
self.magnetic_moment_box = gtk.VBox()
for widget in (self.radio_jmol, self.radio_atno, self.radio_tag,
self.radio_force, self.force_box, self.radio_velocity,
self.radio_charge, self.charge_box,
self.radio_magnetic_moment,
self.magnetic_moment_box,
self.radio_coordination,
self.velocity_box, self.radio_manual, self.radio_same):
pack(self.methodbox, [widget])
if isinstance(widget, gtk.RadioButton):
widget.connect('toggled', self.method_radio_changed)
# Now fill in the box for additional information in case the force is used.
self.force_label = gtk.Label(_("This should not be displayed in forces!"))
pack(self.force_box, [self.force_label])
self.min = gtk.Adjustment(0.0, 0.0, 100.0, 0.05)
self.max = gtk.Adjustment(0.0, 0.0, 100.0, 0.05)
self.steps = gtk.Adjustment(10, 2, 500, 1)
force_apply = gtk.Button(_('Update'))
force_apply.connect('clicked', self.set_min_max_colors)
pack(self.force_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.min, 1.0, 2),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.max, 1.0, 2),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.steps, 1, 0),
gtk.Label(' '),
force_apply])
self.force_box.hide()
# Now fill in the box for additional information in case the velocity is used.
self.velocity_label = gtk.Label("This should not be displayed!")
pack(self.velocity_box, [self.velocity_label])
velocity_apply = gtk.Button(_('Update'))
velocity_apply.connect('clicked', self.set_min_max_colors)
pack(self.velocity_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.min, 1.0, 3),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.max, 1.0, 3),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.steps, 1, 0),
gtk.Label(' '),
velocity_apply])
self.velocity_box.hide()
# Now fill in the box for additional information in case
# the charge is used.
self.charge_label = gtk.Label(_("This should not be displayed!"))
pack(self.charge_box, [self.charge_label])
charge_apply = gtk.Button(_('Update'))
charge_apply.connect('clicked', self.set_min_max_colors)
pack(self.charge_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.min, 10.0, 2),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.max, 10.0, 2),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.steps, 1, 0),
gtk.Label(' '),
charge_apply])
self.charge_box.hide()
# Now fill in the box for additional information in case
# the magnetic moment is used.
self.magnetic_moment_label = gtk.Label(_(
"This should not be displayed!"))
pack(self.magnetic_moment_box, [self.magnetic_moment_label])
magnetic_moment_apply = gtk.Button(_('Update'))
magnetic_moment_apply.connect('clicked', self.set_min_max_colors)
pack(self.magnetic_moment_box, [gtk.Label(_('Min: ')),
gtk.SpinButton(self.min, 10.0, 2),
gtk.Label(_(' Max: ')),
gtk.SpinButton(self.max, 10.0, 2),
gtk.Label(_(' Steps: ')),
gtk.SpinButton(self.steps, 1, 0),
gtk.Label(' '),
magnetic_moment_apply])
self.magnetic_moment_box.hide()
# Lower left: Create a color scale
pack(self.scalebox, gtk.Label(""))
lbl = gtk.Label(_('Create a color scale:'))
pack(self.scalebox, [lbl])
color_scales = (
_('Black - white'),
_('Black - red - yellow - white'),
_('Black - green - white'),
_('Black - blue - cyan'),
_('Blue - white - red'),
_('Hue'),
_('Named colors')
)
self.scaletype_created = None
self.scaletype = gtk.combo_box_new_text()
for s in color_scales:
self.scaletype.append_text(s)
self.createscale = gtk.Button(_("Create"))
pack(self.scalebox, [self.scaletype, self.createscale])
self.createscale.connect('clicked', self.create_color_scale)
# The actually colors are specified in a box possibly with scrollbars
self.colorwin = gtk.ScrolledWindow()
self.colorwin.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
self.colorwin.show()
self.colorbox.add(self.colorwin)
self.colorwin.add_with_viewport(gtk.VBox()) # Dummy contents
buts = cancel_apply_ok(cancel=lambda widget: self.destroy(),
apply=self.apply,
ok=self.ok)
pack(vbox, [buts], end=True, bottom=True)
# Make the initial setup of the colors
self.color_errors = {}
self.init_colors_from_gui()
self.show()
gui.register_vulnerable(self)
def notify_atoms_changed(self):
"Called by gui object when the atoms have changed."
self.destroy()
def init_colors_from_gui(self):
cm = self.gui.colormode
# Disallow methods if corresponding data is not available
if not self.gui.images.T.any():
self.radio_tag.set_sensitive(False)
if self.radio_tag.get_active() or cm == 'tag':
self.radio_jmol.set_active(True)
return
else:
self.radio_tag.set_sensitive(True)
if np.isnan(self.gui.images.F).any() or not self.gui.images.F.any():
self.radio_force.set_sensitive(False)
if self.radio_force.get_active() or cm == 'force':
self.radio_jmol.set_active(True)
return
else:
self.radio_force.set_sensitive(True)
if np.isnan(self.gui.images.V).any() or not self.gui.images.V.any():
self.radio_velocity.set_sensitive(False)
if self.radio_velocity.get_active() or cm == 'velocity':
self.radio_jmol.set_active(True)
return
else:
self.radio_velocity.set_sensitive(True)
if not self.gui.images.q.any():
self.radio_charge.set_sensitive(False)
else:
self.radio_charge.set_sensitive(True)
if not self.gui.images.M.any():
self.radio_magnetic_moment.set_sensitive(False)
else:
self.radio_magnetic_moment.set_sensitive(True)
self.radio_manual.set_sensitive(self.gui.images.natoms <= 1000)
# Now check what the current color mode is
if cm == 'jmol':
self.radio_jmol.set_active(True)
self.set_jmol_colors()
elif cm == 'atno':
self.radio_atno.set_active(True)
elif cm == 'tags':
self.radio_tag.set_active(True)
elif cm == 'force':
self.radio_force.set_active(True)
elif cm == 'velocity':
self.radio_velocity.set_active(True)
elif cm == 'charge':
self.radio_charge.set_active(True)
elif cm == 'magnetic moment':
self.radio_magnetic_moment.set_active(True)
elif cm == 'coordination':
self.radio_coordination.set_active(True)
elif cm == 'manual':
self.radio_manual.set_active(True)
elif cm == 'same':
self.radio_same.set_active(True)
def method_radio_changed(self, widget=None):
"Called when a radio button is changed."
self.scaletype_created = None
self.scaletype.set_active(-1)
if not widget.get_active():
# Ignore most events when a button is turned off.
if widget is self.radio_force:
self.force_box.hide()
if widget is self.radio_velocity:
self.velocity_box.hide()
return
if widget is self.radio_jmol:
self.set_jmol_colors()
elif widget is self.radio_atno:
self.set_atno_colors()
elif widget is self.radio_tag:
self.set_tag_colors()
elif widget is self.radio_force:
self.show_force_stuff()
self.set_min_max_colors('force')
elif widget is self.radio_velocity:
self.show_velocity_stuff()
self.set_min_max_colors('velocity')
elif widget is self.radio_charge:
self.show_charge_stuff()
self.set_min_max_colors('charge')
elif widget is self.radio_magnetic_moment:
self.show_magnetic_moment_stuff()
self.set_min_max_colors('magnetic moment')
elif widget is self.radio_coordination:
self.set_coordination_colors()
elif widget is self.radio_manual:
self.set_manual_colors()
elif widget is self.radio_same:
self.set_same_color()
else:
raise RuntimeError('Unknown widget in method_radio_changed')
def make_jmol_colors(self):
"Set the colors to the default jmol colors"
self.colordata_z = []
hasfound = {}
for z in self.gui.images.Z:
if z not in hasfound:
hasfound[z] = True
self.colordata_z.append([z, jmol_colors[z]])
def set_jmol_colors(self):
"We use the immutable jmol colors."
self.make_jmol_colors()
self.set_atno_colors()
for entry in self.color_entries:
entry.set_sensitive(False)
self.colormode = 'jmol'
def set_atno_colors(self):
"We use user-specified per-element colors."
if not hasattr(self, 'colordata_z'):
# No initial colors. Use jmol colors
self.make_jmol_colors()
self.actual_colordata = self.colordata_z
self.color_labels = ["%i (%s):" % (z, ase.data.chemical_symbols[z])
for z, col in self.colordata_z]
self.make_colorwin()
self.colormode = 'atno'
def set_tag_colors(self):
"We use per-tag colors."
# Find which tags are in use
tags = self.gui.images.T
existingtags = range(tags.min(), tags.max() + 1)
if not hasattr(self, 'colordata_tags') or len(self.colordata_tags) != len(existingtags):
colors = self.get_named_colors(len(existingtags))
self.colordata_tags = [[x, y] for x, y in
zip(existingtags, colors)]
self.actual_colordata = self.colordata_tags
self.color_labels = [str(x)+':' for x, y in self.colordata_tags]
self.make_colorwin()
self.colormode = 'tags'
def set_same_color(self):
"All atoms have the same color"
if not hasattr(self, 'colordata_same'):
try:
self.colordata_same = self.actual_colordata[0:1]
except AttributeError:
self.colordata_same = self.get_named_colors(1)
self.actual_colordata = self.colordata_same
self.actual_colordata[0][0] = 0
self.color_labels = ['all:']
self.make_colorwin()
self.colormode = 'same'
def set_min_max_colors(self, mode):
borders = np.linspace(self.min.value, self.max.value, self.steps.value,
endpoint=False)
if self.scaletype_created is None:
colors = self.new_color_scale([[0, [1,1,1]],
[1, [0,0,1]]], len(borders))
elif (mode not in self.colordata or
len(self.colordata[mode]) != len(borders)):
colors = self.get_color_scale(len(borders), self.scaletype_created)
else:
colors = [y for x, y in self.colordata[mode]]
self.colordata[mode] = [[x, y] for x, y in zip(borders, colors)]
self.actual_colordata = self.colordata[mode]
self.color_labels = ["%.2f:" % x for x, y in self.colordata[mode]]
self.make_colorwin()
self.colormode = mode
factor = self.steps.value / (self.max.value - self.min.value)
self.colormode_data = (self.min.value, factor)
def set_coordination_colors(self, *args):
"Use coordination as basis for the colors."
if not hasattr(self.gui, 'coordination'):
self.gui.toggle_show_bonds(None)
coords = self.gui.coordination
existing = range(0, coords.max() + 1)
if not hasattr(self, 'colordata_coordination'):
colors = self.get_named_colors(len(named_colors))
self.colordata_coordination = [[x, y] for x, y in
enumerate(colors)]
self.actual_colordata = self.colordata_coordination
self.color_labels = [(str(x) + ':')
for x, y in self.colordata_coordination]
self.make_colorwin()
self.colormode = 'coordination'
def set_manual_colors(self):
"Set colors of all atoms from the last selection."
# We cannot directly make np.arrays of the colors, as they may
# be sequences of the same length, causing creation of a 2D
# array of characters/numbers instead of a 1D array of
# objects.
colors = np.array([None] * self.gui.images.natoms)
if self.colormode in ['atno', 'jmol', 'tags']:
maxval = max([x for x, y in self.actual_colordata])
oldcolors = np.array([None] * (maxval+1))
for x, y in self.actual_colordata:
oldcolors[x] = y
if self.colormode == 'tags':
colors[:] = oldcolors[self.gui.images.T[self.gui.frame]]
else:
colors[:] = oldcolors[self.gui.images.Z]
elif self.colormode == 'force':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
F = self.gui.images.F[self.gui.frame]
F = np.sqrt((F * F).sum(axis=-1))
nF = (F - self.colormode_force_data[0]) * self.colormode_force_data[1]
nF = np.clip(nF.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nF]
elif self.colormode == 'velocity':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
V = self.gui.images.V[self.gui.frame]
V = np.sqrt((V * V).sum(axis=-1))
nV = (V - self.colormode_velocity_data[0]) * self.colormode_velocity_data[1]
nV = np.clip(nV.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nV]
elif self.colormode == 'charge':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
q = self.gui.images.q[self.gui.frame]
nq = ((q - self.colormode_charge_data[0]) *
self.colormode_charge_data[1])
nq = np.clip(nq.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nq]
elif self.colormode == 'magnetic moment':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
q = self.gui.images.q[self.gui.frame]
nq = ((q - self.colormode_magnetic_moment_data[0]) *
self.colormode_magnetic_moment_data[1])
nq = np.clip(nq.astype(int), 0, len(oldcolors)-1)
colors[:] = oldcolors[nq]
elif self.colormode == 'coordination':
oldcolors = np.array([None] * len(self.actual_colordata))
oldcolors[:] = [y for x, y in self.actual_colordata]
print(self.gui.images.bonds)
elif self.colormode == 'same':
oldcolor = self.actual_colordata[0][1]
if len(colors) == len(oldcolor):
# Direct assignment would be e.g. one letter per atom. :-(
colors[:] = [oldcolor] * len(colors)
else:
colors[:] = oldcolor
elif self.colormode == 'manual':
if self.actual_colordata is None: # import colors from gui, if they don't exist already
colors = [y for x,y in self.gui.colordata]
self.color_labels = ["%d:" % i for i in range(len(colors))]
self.actual_colordata = [[i, x] for i, x in enumerate(colors)]
self.make_colorwin()
self.colormode = 'manual'
def get_min_max_text(self, mode, vmin, vmax, min_frame, max_frame):
nimages = self.gui.images.nimages
txt = 'Max {0}: {1:.2f}'.format(mode, vmax)
if nimages > 1:
txt += '(all frames), {0:.2f} (this frame)'.format(max_frame)
self.max.value = vmax
if vmin is None:
self.min.value = 0.
else:
txt += ', Min {0:.2f}'.format(vmin)
if nimages > 1:
txt += '(all frames), {0:.2f} (this frame)'.format(min_frame)
self.min.value = vmin
return txt
def show_force_stuff(self):
"Show and update widgets needed for selecting the force scale."
self.force_box.show()
# XXX is this projected on some axis ? XXX
F = np.sqrt(((self.gui.images.F *
self.gui.images.dynamic[:,np.newaxis])**2).sum(axis=-1))
txt = self.get_min_max_text(
'force', None, F.max(),
None, self.gui.images.F[self.gui.frame].max())
self.force_label.set_text(_(txt))
def show_velocity_stuff(self):
"Show and update widgets needed for selecting the velocity scale."
self.velocity_box.show()
V = np.sqrt((self.gui.images.V * self.gui.images.V).sum(axis=-1))
Vframe = np.sqrt((self.gui.images.V[self.gui.frame] *
self.gui.images.V[self.gui.frame]).sum(axis=-1))
txt = self.get_min_max_text(
'velocity', None, V.max(), None, Vframe.max())
self.velocity_label.set_text(_(txt))
def show_charge_stuff(self):
"Show and update widgets needed for selecting the charge scale."
self.charge_box.show()
txt = self.get_min_max_text(
'charge', self.gui.images.q.min(), self.gui.images.q.max(),
self.gui.images.q[self.gui.frame].min(),
self.gui.images.q[self.gui.frame].max())
self.charge_label.set_text(_(txt))
def show_magnetic_moment_stuff(self):
"Show and update widgets needed for selecting the magn. mom. scale."
self.magnetic_moment_box.show()
txt = self.get_min_max_text(
'magnetic moment', self.gui.images.M.min(), self.gui.images.M.max(),
self.gui.images.M[self.gui.frame].min(),
self.gui.images.M[self.gui.frame].max())
self.magnetic_moment_label.set_text(_(txt))
def make_colorwin(self):
"""Make the list of editable color entries.
Uses self.actual_colordata and self.color_labels. Produces self.color_entries.
"""
assert len(self.actual_colordata) == len(self.color_labels)
self.color_entries = []
old = self.colorwin.get_child()
self.colorwin.remove(old)
del old
table = gtk.Table(len(self.actual_colordata)+1, 4)
self.colorwin.add_with_viewport(table)
table.show()
self.color_display = []
for i in range(len(self.actual_colordata)):
lbl = gtk.Label(self.color_labels[i])
entry = gtk.Entry(max=20)
val = self.actual_colordata[i][1]
error = False
if not isinstance(val, str):
assert len(val) == 3
intval = tuple(np.round(65535*np.array(val)).astype(int))
val = "%.3f, %.3f, %.3f" % tuple(val)
clr = gtk.gdk.Color(*intval)
else:
try:
clr = gtk.gdk.color_parse(val)
except ValueError:
error = True
entry.set_text(val)
blob = gtk.EventBox()
space = gtk.Label
space = gtk.Label(" ")
space.show()
blob.add(space)
if error:
space.set_text(_("ERROR"))
else:
blob.modify_bg(gtk.STATE_NORMAL, clr)
table.attach(lbl, 0, 1, i, i+1, yoptions=0)
table.attach(entry, 1, 2, i, i+1, yoptions=0)
table.attach(blob, 2, 3, i, i+1, yoptions=0)
lbl.show()
entry.show()
blob.show()
entry.connect('changed', self.entry_changed, i)
self.color_display.append(blob)
self.color_entries.append(entry)
def entry_changed(self, widget, index):
"""The user has changed a color."""
txt = widget.get_text()
txtfields = txt.split(',')
if len(txtfields) == 3:
self.actual_colordata[index][1] = [float(x) for x in txtfields]
val = tuple([int(65535*float(x)) for x in txtfields])
clr = gtk.gdk.Color(*val)
else:
self.actual_colordata[index][1] = txt
try:
clr = gtk.gdk.color_parse(txt)
except ValueError:
# Cannot parse the color
displ = self.color_display[index]
displ.modify_bg(gtk.STATE_NORMAL, gtk.gdk.color_parse('white'))
displ.get_child().set_text(_("ERR"))
self.color_errors[index] = (self.color_labels[index], txt)
return
self.color_display[index].get_child().set_text(" ") # Clear error message
self.color_errors.pop(index, None)
self.color_display[index].modify_bg(gtk.STATE_NORMAL, clr)
def create_color_scale(self, *args):
if self.radio_jmol.get_active():
self.radio_atno.set_active(1)
n = len(self.color_entries)
s = self.scaletype.get_active()
scale = self.get_color_scale(n, s)
self.scaletype_created = s
for i in range(n):
if isinstance(scale[i], str):
self.color_entries[i].set_text(scale[i])
else:
s = "%.3f, %.3f, %.3f" % tuple(scale[i])
self.color_entries[i].set_text(s)
self.color_entries[i].activate()
def get_color_scale(self, n, s):
if s == 0:
# Black - White
scale = self.new_color_scale([[0, [0,0,0]],
[1, [1,1,1]]], n)
elif s == 1:
# Black - Red - Yellow - White (STM colors)
scale = self.new_color_scale([[0, [0,0,0]],
[0.33, [1,0,0]],
[0.67, [1,1,0]],
[1, [1,1,1]]], n)
elif s == 2:
# Black - Green - White
scale = self.new_color_scale([[0, [0,0,0]],
[0.5, [0,0.9,0]],
[0.75, [0.2,1.0,0.2]],
[1, [1,1,1]]], n)
elif s == 3:
# Black - Blue - Cyan
scale = self.new_color_scale([[0, [0,0,0]],
[0.5, [0,0,1]],
[1, [0,1,1]]], n)
elif s == 4:
# Blue - White - Red
scale = self.new_color_scale([[0, [0,0,1]],
[0.5, [1,1,1]],
[2, [1,0,0]]], n)
elif s == 5:
# Hues
hues = np.linspace(0.0, 1.0, n, endpoint=False)
scale = ["%.3f, %.3f, %.3f" % colorsys.hls_to_rgb(h, 0.5, 1)
for h in hues]
elif s == 6:
# Named colors
scale = self.get_named_colors(n)
else:
scale = None
return scale
def new_color_scale(self, fixpoints, n):
"Create a homogeneous color scale."
x = np.array([a[0] for a in fixpoints], float)
y = np.array([a[1] for a in fixpoints], float)
assert y.shape[1] == 3
res = []
for a in np.linspace(0.0, 1.0, n, endpoint=True):
n = x.searchsorted(a)
if n == 0:
v = y[0] # Before the start
elif n == len(x):
v = x[-1] # After the end
else:
x0 = x[n-1]
x1 = x[n]
y0 = y[n-1]
y1 = y[n]
v = y0 + (y1 - y0) / (x1 - x0) * (a - x0)
res.append(v)
return res
def get_named_colors(self, n):
if n <= len(named_colors):
return named_colors[:n]
else:
return named_colors + ('Black',) * (n - len(named_colors))
def apply(self, *args):
#if self.colormode in ['atno', 'jmol', 'tags']:
# Color atoms according to an integer value number
if self.color_errors:
oops(_("Incorrect color specification"),
"%s: %s" % self.color_errors.values()[0])
return False
colordata = self.actual_colordata
if self.colormode in [
'force', 'velocity', 'charge', 'magnetic moment']:
# Use integers instead for border values
colordata = [[i, x[1]] for i, x in enumerate(self.actual_colordata)]
self.gui.colormode_data = self.colormode_data
maxval = max([x for x, y in colordata])
self.gui.colors = [None] * (maxval + 1)
new = self.gui.drawing_area.window.new_gc
alloc = self.gui.colormap.alloc_color
for z, val in colordata:
if isinstance(val, str):
self.gui.colors[z] = new(alloc(val))
else:
clr = tuple([int(65535*x) for x in val])
assert len(clr) == 3
self.gui.colors[z] = new(alloc(*clr))
self.gui.colormode = self.colormode
self.gui.colordata = colordata
self.gui.draw()
return True
def cancel(self, *args):
self.destroy()
def ok(self, *args):
if self.apply():
self.destroy()
| gpl-2.0 |
garethsion/UCL_RSD_Assessment_1 | greengraph/map.py | 1 | 1604 | #!/usr/bin/env python
import numpy as np
from io import BytesIO
from matplotlib import image as img
import requests
class Map(object):
def __init__(self, lat, long, satellite=True, zoom=10,
size=(400,400), sensor=False):
base="http://maps.googleapis.com/maps/api/staticmap?"
params=dict(
sensor= str(sensor).lower(),
zoom= zoom,
size= "x".join(map(str, size)),
center= ",".join(map(str, (lat, long) )),
style="feature:all|element:labels|visibility:off"
)
if satellite:
params["maptype"]="satellite"
self.image = requests.get(base,
params=params).content # Fetch our PNG image data
content = BytesIO(self.image)
self.pixels= img.imread(content) # Parse our PNG image as a numpy array
def green(self, threshold):
# Use NumPy to build an element-by-element logical array
greener_than_red = self.pixels[:,:,1] > threshold* self.pixels[:,:,0]
greener_than_blue = self.pixels[:,:,1] > threshold*self.pixels[:,:,2]
green = np.logical_and(greener_than_red, greener_than_blue)
return green
def count_green(self, threshold = 1.1):
return np.sum(self.green(threshold))
def show_green(self, threshold = 1.1):
green = self.green(threshold)
out = green[:,:,np.newaxis]*np.array([0,1,0])[np.newaxis,np.newaxis,:]
buffer = BytesIO()
result = img.imsave(buffer, out, format='png')
return buffer.getvalue()
| mit |
kjw0106/boto | tests/integration/s3/test_pool.py | 114 | 8255 | # Copyright (c) 2011 Brian Beach
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some multi-threading tests of boto in a greenlet environment.
"""
from __future__ import print_function
import boto
import time
import uuid
from threading import Thread
def spawn(function, *args, **kwargs):
"""
Spawns a new thread. API is the same as
gevent.greenlet.Greenlet.spawn.
"""
t = Thread(target = function, args = args, kwargs = kwargs)
t.start()
return t
def put_object(bucket, name):
bucket.new_key(name).set_contents_from_string(name)
def get_object(bucket, name):
assert bucket.get_key(name).get_contents_as_string().decode('utf-8') == name
def test_close_connections():
"""
A test that exposes the problem where connections are returned to the
connection pool (and closed) before the caller reads the response.
I couldn't think of a way to test it without greenlets, so this test
doesn't run as part of the standard test suite. That way, no more
dependencies are added to the test suite.
"""
print("Running test_close_connections")
# Connect to S3
s3 = boto.connect_s3()
# Clean previous tests.
for b in s3.get_all_buckets():
if b.name.startswith('test-'):
for key in b.get_all_keys():
key.delete()
b.delete()
# Make a test bucket
bucket = s3.create_bucket('test-%d' % int(time.time()))
# Create 30 threads that each create an object in S3. The number
# 30 is chosen because it is larger than the connection pool size
# (20).
names = [str(uuid.uuid4) for _ in range(30)]
threads = [
spawn(put_object, bucket, name)
for name in names
]
for t in threads:
t.join()
# Create 30 threads to read the contents of the new objects. This
# is where closing the connection early is a problem, because
# there is a response that needs to be read, and it can't be read
# if the connection has already been closed.
threads = [
spawn(get_object, bucket, name)
for name in names
]
for t in threads:
t.join()
# test_reuse_connections needs to read a file that is big enough that
# one read() call on the socket won't read the whole thing.
BIG_SIZE = 10000
class WriteAndCount(object):
"""
A file-like object that counts the number of characters written.
"""
def __init__(self):
self.size = 0
def write(self, data):
self.size += len(data)
time.sleep(0) # yield to other threads
def read_big_object(s3, bucket, name, count):
for _ in range(count):
key = bucket.get_key(name)
out = WriteAndCount()
key.get_contents_to_file(out)
if out.size != BIG_SIZE:
print(out.size, BIG_SIZE)
assert out.size == BIG_SIZE
print(" pool size:", s3._pool.size())
class LittleQuerier(object):
"""
An object that manages a thread that keeps pulling down small
objects from S3 and checking the answers until told to stop.
"""
def __init__(self, bucket, small_names):
self.running = True
self.bucket = bucket
self.small_names = small_names
self.thread = spawn(self.run)
def stop(self):
self.running = False
self.thread.join()
def run(self):
count = 0
while self.running:
i = count % 4
key = self.bucket.get_key(self.small_names[i])
expected = str(i)
rh = { 'response-content-type' : 'small/' + str(i) }
actual = key.get_contents_as_string(response_headers = rh).decode('utf-8')
if expected != actual:
print("AHA:", repr(expected), repr(actual))
assert expected == actual
count += 1
def test_reuse_connections():
"""
This test is an attempt to expose problems because of the fact
that boto returns connections to the connection pool before
reading the response. The strategy is to start a couple big reads
from S3, where it will take time to read the response, and then
start other requests that will reuse the same connection from the
pool while the big response is still being read.
The test passes because of an interesting combination of factors.
I was expecting that it would fail because two threads would be
reading the same connection at the same time. That doesn't happen
because httplib catches the problem before it happens and raises
an exception.
Here's the sequence of events:
- Thread 1: Send a request to read a big S3 object.
- Thread 1: Returns connection to pool.
- Thread 1: Start reading the body if the response.
- Thread 2: Get the same connection from the pool.
- Thread 2: Send another request on the same connection.
- Thread 2: Try to read the response, but
HTTPConnection.get_response notices that the
previous response isn't done reading yet, and
raises a ResponseNotReady exception.
- Thread 2: _mexe catches the exception, does not return the
connection to the pool, gets a new connection, and
retries.
- Thread 1: Finish reading the body of its response.
- Server: Gets the second request on the connection, and
sends a response. This response is ignored because
the connection has been dropped on the client end.
If you add a print statement in HTTPConnection.get_response at the
point where it raises ResponseNotReady, and then run this test,
you can see that it's happening.
"""
print("Running test_reuse_connections")
# Connect to S3
s3 = boto.connect_s3()
# Make a test bucket
bucket = s3.create_bucket('test-%d' % int(time.time()))
# Create some small objects in S3.
small_names = [str(uuid.uuid4()) for _ in range(4)]
for (i, name) in enumerate(small_names):
bucket.new_key(name).set_contents_from_string(str(i))
# Wait, clean the connection pool, and make sure it's empty.
print(" waiting for all connections to become stale")
time.sleep(s3._pool.STALE_DURATION + 1)
s3._pool.clean()
assert s3._pool.size() == 0
print(" pool is empty")
# Create a big object in S3.
big_name = str(uuid.uuid4())
contents = "-" * BIG_SIZE
bucket.new_key(big_name).set_contents_from_string(contents)
# Start some threads to read it and check that they are reading
# the correct thing. Each thread will read the object 40 times.
threads = [
spawn(read_big_object, s3, bucket, big_name, 20)
for _ in range(5)
]
# Do some other things that may (incorrectly) re-use the same
# connections while the big objects are being read.
queriers = [
LittleQuerier(bucket, small_names)
for _ in range(5)
]
# Clean up.
for t in threads:
t.join()
for q in queriers:
q.stop()
def main():
test_close_connections()
test_reuse_connections()
if __name__ == '__main__':
main()
| mit |
hhbyyh/spark | examples/src/main/python/transitive_closure.py | 128 | 2408 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from random import Random
from pyspark.sql import SparkSession
numEdges = 200
numVertices = 100
rand = Random(42)
def generateGraph():
edges = set()
while len(edges) < numEdges:
src = rand.randrange(0, numVertices)
dst = rand.randrange(0, numVertices)
if src != dst:
edges.add((src, dst))
return edges
if __name__ == "__main__":
"""
Usage: transitive_closure [partitions]
"""
spark = SparkSession\
.builder\
.appName("PythonTransitiveClosure")\
.getOrCreate()
partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
tc = spark.sparkContext.parallelize(generateGraph(), partitions).cache()
# Linear transitive closure: each round grows paths by one edge,
# by joining the graph's edges with the already-discovered paths.
# e.g. join the path (y, z) from the TC with the edge (x, y) from
# the graph to obtain the path (x, z).
# Because join() joins on keys, the edges are stored in reversed order.
edges = tc.map(lambda x_y: (x_y[1], x_y[0]))
oldCount = 0
nextCount = tc.count()
while True:
oldCount = nextCount
# Perform the join, obtaining an RDD of (y, (z, x)) pairs,
# then project the result to obtain the new (x, z) paths.
new_edges = tc.join(edges).map(lambda __a_b: (__a_b[1][1], __a_b[1][0]))
tc = tc.union(new_edges).distinct().cache()
nextCount = tc.count()
if nextCount == oldCount:
break
print("TC has %i edges" % tc.count())
spark.stop()
| apache-2.0 |
corvorepack/REPOIVAN | plugin.video.movie.ultra.7k/resources/tools/media_analyzer.py | 2 | 30555 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# Analizador de medios de PalcoTV
# Version 0.1 (11/12/2015)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a Juarrox
#------------------------------------------------------------
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import urllib2
import HTMLParser
import urllib,urlparse
from BeautifulSoup import BeautifulSoup as bs
from resources.tools.resolvers import *
import json
from __main__ import *
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
def plugin_analyzer(data, title, plot, datamovie, thumbnail, fanart):
plugintools.log("[%s %s] Analizando plugin... %s " % (addonName, addonVersion, fanart))
if data.startswith("plugin://plugin.video.SportsDevil/") == True:
url = data.strip()
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [SportsDevil][/I][/COLOR]', url = url , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.f4mTester") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [F4M][/I][/COLOR]', plot = plot , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
elif data.startswith("plugin://plugin.video.youtube") == True:
if data.startswith("plugin://plugin.video.youtube/channel/") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [You[B]Tube[/B] Channel][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.youtube/user/") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [You[B]Tube[/B] User][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.youtube/playlist/") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [You[B]Tube[/B] Playlist][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.youtube/play/?playlist_id") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [You[B]Tube[/B] Playlist][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
else:
plugintools.runAddon( action = "play" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [You[B]Tube[/B] Video][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
elif data.find("plugin.video.p2p-streams") == True:
if data.find("mode=1") >= 0 : # Acestream
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Acestream][/I][/COLOR]' , plot = plot , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
elif data.find("mode=2") >= 0 : # Sopcast
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Sopcast][/I][/COLOR]' , plot = plot , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = True )
elif data.find("mode=401") >= 0 : # P2P-Streams Parser
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [p2p-streams][/I][/COLOR]' , plot = plot , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.p2psport") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [P2P Sport][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.live.streamspro") == True:
if data.strip().find("mode=1&name=") >=0 or data.strip().find("makelist") >=0 : # Parcheado por DMO: Soporte de pseudo parsers de LSP y listas (DMO)
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [LiveStreams][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
else:
plugintools.runAddon( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [LiveStreams][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
elif data.startswith("plugin://plugin.video.stalker") == True:
mac=plugintools.read('https://copy.com/HuEtREKgnvlc9XrS'); # Mac Arena+
data=data.replace("MAC_STALKER", mac).strip()
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Stalker][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
elif data.startswith("plugin://plugin.video.dailymotion_com") == True: # Dailymotion (2.1.5)
if data.find("mode=showPlaylist") >= 0:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Dailymotion Playlist][/I][/COLOR]', url = data , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
else:
plugintools.runAddon( action = "play" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Dailymotion Video][/I][/COLOR]', url = data , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = False , isPlayable = False )
elif data.startswith("plugin://plugin.video.ArenaDevil") == True: # ArenaDevil modules
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [ArenaDevil][/I][/COLOR]', url = data , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.videodevil") == True: # VideoDevil modules
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [VideoDevil][/I][/COLOR]', url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.pelisalacarta") == True: # Pelisalacarta
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Pelisalacarta][/I][/COLOR]' , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://script.extendedinfo") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [ExtendedInfo][/I][/COLOR]' , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
elif data.startswith("plugin://plugin.video.pulsar/movie/") == True:
plugintools.add_item( action = "runPlugin" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Pulsar][/I][/COLOR]' , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
else:
plugintools.add_item( action = "play" , title = '[COLOR white]' + title + '[COLOR lightyellow][I] [Addon][/I][/COLOR]' , url = data.strip() , info_labels = datamovie, thumbnail = thumbnail , fanart = fanart , folder = True , isPlayable = False )
def p2p_builder_url(url, title_fixed, p2p):
if p2p == "ace":
p2p_launcher = plugintools.get_setting("p2p_launcher")
plugintools.log("p2p_launcher= "+p2p_launcher)
if p2p_launcher == "0":
url = 'plugin://program.plexus/?url='+url+'&mode=1&name='+title_fixed
else:
url = 'plugin://plugin.video.p2p-streams/?url='+url+'&mode=1&name='+title_fixed
elif p2p == "sop":
p2p_launcher = plugintools.get_setting("p2p_launcher")
plugintools.log("p2p_launcher= "+p2p_launcher)
if p2p_launcher == "0":
url = 'plugin://program.plexus/?url='+url+'&mode=2&name='+title_fixed
else:
url = 'plugin://plugin.video.p2p-streams/?url='+url+'&mode=2&name='+title_fixed
elif p2p == "torrent":
url = urllib.quote_plus(url)
addon_torrent = plugintools.get_setting("addon_torrent")
if addon_torrent == "Stream": # Stream (por defecto)
url = 'plugin://plugin.video.stream/play/'+url
elif addon_torrent == "Pulsar": # Pulsar
url = 'plugin://plugin.video.pulsar/play?uri=' + url
elif addon_torrent == "XBMCtorrent": # XBMCtorrent
url = 'plugin://plugin.video.quasar/play?uri=' + url
elif addon_torrent == "Plexus": # Plexus
url = 'plugin://program.plexus/?url=' + url
elif addon_torrent == "Quasar": # Quasar
url = 'plugin://plugin.video.quasar/play?uri=' + url
elif addon_torrent == "YATP": # YATP
url = 'plugin://plugin.video.yatp/play?uri=' + url
elif p2p == "magnet":
addon_magnet = plugintools.get_setting("addon_magnet")
if addon_magnet == "0": # Stream (por defecto)
url = 'plugin://plugin.video.stream/play/'+url
elif addon_magnet == "1": # Pulsar
url = 'plugin://plugin.video.pulsar/play?uri=' + url
elif addon_magnet == "2": # Kmediatorrent
url = 'plugin://plugin.video.kmediatorrent/play/'+url
elif addon_magnet == "3": # XBMCtorrent
url = 'plugin://plugin.video.xbmctorrent/play?uri=' + url
elif addon_magnet == "4": # Quasar
url = 'plugin://plugin.video.quasar/play?uri=' + url
elif addon_magnet == "5": # YATP
url = 'plugin://plugin.video.yatp/play?uri=' + url
plugintools.log("[%s %s] Creando llamada para URL P2P... %s " % (addonName, addonVersion, url))
return url
def video_analyzer(url):
plugintools.log("[%s %s] Análisis de URL de vídeo... " % (addonName, addonVersion))
if url.find("allmyvideos") >=0: server = "allmyvideos"
elif url.find("vidspot") >= 0: server = "vidspot"
elif url.find("played") >= 0: server = "playedto"
elif url.find("streamin.to") >= 0: server = "streaminto"
elif url.find("streamcloud") >= 0: server = "streamcloud"
elif url.find("nowvideo") >= 0: server = "nowvideo"
elif url.find("veehd") >= 0: server = "veehd"
elif url.find("vk") >= 0: server = "vk"
elif url.find("lidplay") >= 0: server = "vk"
elif url.find("tumi") >= 0: server = "tumi"
elif url.find("novamov") >= 0: server = "novamov"
elif url.find("moevideos") >= 0: server = "moevideos"
elif url.find("gamovideo") >= 0: server = "gamovideo"
elif url.find("movshare") >= 0: server = "movshare"
elif url.find("powvideo") >= 0: server = "powvideo"
elif url.find("mail.ru") >= 0: server = "mailru"
elif url.find("mediafire") >= 0: server = "mediafire"
elif url.find("netu") >= 0: server = "netu"
elif url.find("waaw") >= 0: server = "waaw"
elif url.find("movreel") >= 0: server = "movreel"
elif url.find("videobam") >= 0: server = "videobam"
elif url.find("vimeo/videos") >= 0: server = "vimeo"
elif url.find("vimeo/channels") >= 0: server = "vimeo_pl"
elif url.find("veetle") >= 0: server = "veetle"
elif url.find("videoweed") >= 0: server = "videoweed"
elif url.find("streamable") >= 0: server = "streamable"
elif url.find("rocvideo") >= 0: server = "rocvideo"
elif url.find("realvid") >= 0: server = "realvid"
elif url.find("videomega") >= 0: server = "videomega"
elif url.find("video.tt") >= 0: server = "videott"
elif url.find("flashx") >= 0: server = "flashx"
elif url.find("openload") >= 0: server = "openload"
elif url.find("turbovideos") >= 0: server = "turbovideos"
elif url.find("ok.ru") >= 0: server = "okru"
elif url.find("vidto") >= 0: server = "vidtome"
elif url.find("playwire") >= 0: server = "playwire"
elif url.find("copiapop") >= 0: server = "copiapop"
elif url.find("vimple") >= 0: server = "vimple"
elif url.find("vidgg") >= 0: server = "vidggto"
elif url.find("uptostream") >= 0: server = "uptostream"
elif url.find("youwatch") >= 0: server = "youwatch"
elif url.find("idowatch") >= 0: server = "idowatch"
elif url.find("cloudtime") >= 0: server = "cloudtime"
elif url.find("allvid") >= 0: server = "allvid"
elif url.find("vodlocker") >= 0: server = "vodlocker"
elif url.find("vidzi") >= 0: server = "vidzitv"
elif url.find("streame") >= 0: server = "streamenet"
elif url.find("myvideoz") >= 0: server = "myvideoz"
elif url.find("streamplay") >= 0: server = "streamplay"
elif url.find("watchonline") >= 0: server = "watchonline"
elif url.find("rutube") >= 0: server = "rutube"
elif url.find("dailymotion") >= 0: server = "dailymotion"
elif url.find("auroravid") >= 0: server = "auroravid"
elif url.find("wholecloud") >= 0: server = "wholecloud"
elif url.find("bitvid") >= 0: server = "bitvid"
elif url.find("spruto") >= 0: server = "spruto"
elif url.find("stormo") >= 0: server = "stormo"
elif url.find("myvi.ru") >= 0: server = "myviru"
elif url.find("youtube") >= 0: server = "youtube"
elif url.find("filmon") >= 0: server = "filmon"
elif url.find("thevideo.me") >= 0: server = "thevideome"
elif url.find("videowood") >= 0: server = "videowood"
elif url.find("neodrive") >= 0: server = "neodrive"
elif url.find("cloudzilla") >= 0: server = "cloudzilla"
elif url.find("thevideobee") >= 0: server = "thevideobee"
elif url.find("fileshow") >= 0: server = "fileshow"
elif url.find("vid.ag") >= 0: server = "vid"
elif url.find("vidxtreme") >= 0: server = "vidxtreme"
elif url.find("vidup") >= 0: server = "vidup"
elif url.find("watchvideo") >= 0: server = "watchvideo"
elif url.find("speedvid") >= 0: server = "speedvid"
elif url.find("chefti.info") >= 0: server = "exashare"
elif url.find("ajihezo.info") >= 0: server = "exashare"
elif url.find("erd9x4.info") >= 0: server = "exashare"
elif url.find("bojem3a.info") >= 0: server = "exashare"
elif url.find("vodbeast") >= 0: server = "vodbeast"
elif url.find("nosvideo") >= 0: server = "nosvideo"
elif url.find("noslocker") >= 0: server = "noslocker"
elif url.find("up2stream") >= 0: server = "up2stream"
elif url.find("diskokosmiko") >= 0: server = "diskokosmiko"
elif url.find("smartvid") >= 0: server = "smartvid"
elif url.find("greevid") >= 0: server = "greevid"
elif url.find("letwatch") >= 0: server = "letwatch"
elif url.find("yourupload") >= 0: server = "yourupload"
elif url.find("zalaa") >= 0: server = "zalaa"
elif url.find("uploadc") >= 0: server = "uploadc"
elif url.find("mp4upload") >= 0: server = "mp4upload"
elif url.find("rapidvideo") >= 0: server = "rapidvideo"
elif url.find("yourvideohost") >= 0: server = "yourvideohost"
elif url.find("watchers") >= 0: server = "watchers"
elif url.find("vidtodo") >= 0: server = "vidtodo"
elif url.find("izanagi") >= 0: server = "izanagi"
elif url.find("yotta") >= 0: server = "yotta"
elif url.find("kami") >= 0: server = "kami"
elif url.find("touchfile") >= 0: server = "touchfile"
elif url.find("zstream") >= 0: server = "zstream"
elif url.find("vodlock") >= 0: server = "vodlock"
elif url.find("goodvideohost") >= 0: server = "goodvideohost"
elif url.find("happystreams") >= 0: server = "happystreams"
else: server = 'unknown'
return server
'''
print url;print playable
if url.find("streamcloud") >=0:
if playable == True: streamcloud(url)
else: server = "streamcloud";return server
else: return 'unknown'
'''
def server_analyzer(params):
plugintools.log("[%s %s] Análisis de Servidores de vídeo... " % (addonName, addonVersion))
url_final = params.get("url")
plugintools.log(">>>>> Analizando Servidor Para la Url= "+ url_final)
if url_final.find("allmyvideos") >= 0: params["url"]=url_final; allmyvideos(params)
elif url_final.find("vidspot") >= 0: params["url"]=url_final; vidspot(params)
elif url_final.find("played.to") >= 0: params["url"]=url_final; playedto(params)
elif url_final.find("streamin.to") >= 0: params["url"]=url_final; streaminto(params)
elif url_final.find("streamcloud") >= 0: params["url"]=url_final; streamcloud(params)
elif url_final.find("nowvideo.sx") >= 0: params["url"]=url_final; nowvideo(params)
elif url_final.find("veehd") >= 0: params["url"]=url_final; veehd(params)
elif url_final.find("vk") >= 0: params["url"]=url_final; vk(params)
elif url_final.find("lidplay") >= 0: params["url"]=url_final; vk(params)
elif url_final.find("tumi.tv") >= 0: params["url"]=url_final; tumi(params)
elif url_final.find("novamov") >= 0: params["url"]=url_final; novamov(params)
elif url_final.find("moevideos") >= 0: params["url"]=url_final; moevideos(params)
elif url_final.find("gamovideo") >= 0: params["url"]=url_final; gamovideo(params)
elif url_final.find("movshare") >= 0: params["url"]=url_final; movshare(params)
elif url_final.find("powvideo") >= 0: params["url"]=url_final; powvideo(params)
elif url_final.find("mail.ru") >= 0: params["url"]=url_final; mailru(params)
elif url_final.find("mediafire") >= 0: params["url"]=url_final; mediafire(params)
elif url_final.find("netu") >= 0: params["url"]=url_final; netu(params)
elif url_final.find("waaw") >= 0: params["url"]=url_final; waaw(params)
elif url_final.find("movreel") >= 0: params["url"]=url_final; movreel(params)
elif url_final.find("videobam") >= 0: params["url"]=url_final; videobam(params)
elif url_final.find("vimeo/videos") >= 0: params["url"]=url_final; vimeo(params)
elif url_final.find("vimeo/channels") >= 0: params["url"]=url_final; vimeo_pl(params)
elif url_final.find("veetle") >= 0: params["url"]=url_final; veetle(params)
elif url_final.find("videoweed") >= 0: params["url"]=url_final; videoweed(params)
elif url_final.find("streamable") >= 0: params["url"]=url_final; streamable(params)
elif url_final.find("rocvideo") >= 0: params["url"]=url_final; rocvideo(params)
elif url_final.find("realvid") >= 0: params["url"]=url_final; realvid(params)
elif url_final.find("videomega") >= 0: params["url"]=url_final; videomega(params)
elif url_final.find("video.tt") >= 0: params["url"]=url_final; videott(params)
elif url_final.find("flashx") >= 0: params["url"]=url_final; flashx(params)
elif url_final.find("openload") >= 0: params["url"]=url_final; openload(params)
elif url_final.find("turbovideos") >= 0: params["url"]=url_final; turbovideos(params)
elif url_final.find("ok.ru") >= 0: params["url"]=url_final; okru(params)
elif url_final.find("vidto.me") >= 0: params["url"]=url_final; vidtome(params)
elif url_final.find("playwire") >= 0: params["url"]=url_final; playwire(params)
elif url_final.find("copiapop") >= 0: params["url"]=url_final; copiapop(params)
elif url_final.find("vimple.ru") >= 0: params["url"]=url_final; vimple(params)
elif url_final.find("vidgg") >= 0: params["url"]=url_final; vidggto(params)
elif url_final.find("uptostream.com") >= 0: params["url"]=url_final; uptostream(params)
elif url_final.find("youwatch") >= 0: params["url"]=url_final; youwatch(params)
elif url_final.find("smed79") >= 0: params["url"]=url_final; youwatch(params)
elif url_final.find("idowatch") >= 0: params["url"]=url_final; idowatch(params)
elif url_final.find("cloudtime") >= 0: params["url"]=url_final; cloudtime(params)
elif url_final.find("allvid") >= 0: params["url"]=url_final; allvid(params)
elif url_final.find("vodlocker") >= 0: params["url"]=url_final; vodlocker(params)
elif url_final.find("vidzi.tv") >= 0: params["url"]=url_final; vidzitv(params)
elif url_final.find("streame.net") >= 0: params["url"]=url_final; streamenet(params)
elif url_final.find("myvideoz") >= 0: params["url"]=url_final; myvideoz(params)
elif url_final.find("streamplay") >= 0: params["url"]=url_final; streamplay(params)
elif url_final.find("watchonline") >= 0: params["url"]=url_final; watchonline(params)
elif url_final.find("rutube") >= 0: params["url"]=url_final; rutube(params)
elif url_final.find("dailymotion") >= 0: params["url"]=url_final; dailymotion(params)
elif url_final.find("auroravid") >= 0: params["url"]=url_final; auroravid(params)
elif url_final.find("wholecloud") >= 0: params["url"]=url_final; wholecloud(params)
elif url_final.find("bitvid") >= 0: params["url"]=url_final; bitvid(params)
elif url_final.find("spruto") >= 0: params["url"]=url_final; spruto(params)
elif url_final.find("stormo") >= 0: params["url"]=url_final; stormo(params)
elif url_final.find("myvi.ru") >= 0: params["url"]=url_final; myviru(params)
elif url_final.find("youtube.com") >= 0: params["url"]=url_final; youtube(params)
elif url_final.find("filmon.com") >= 0: params["url"]=url_final; filmon(params)
elif url_final.find("thevideo.me") >= 0: params["url"]=url_final; thevideome(params)
elif url_final.find("videowood.tv") >= 0: params["url"]=url_final; videowood(params)
elif url_final.find("neodrive.co") >= 0: params["url"]=url_final; neodrive(params)
elif url_final.find("cloudzilla") >= 0: params["url"]=url_final; cloudzilla(params)
elif url_final.find("thevideobee.to") >= 0: params["url"]=url_final; thevideobee(params)
elif url_final.find("fileshow.tv") >= 0: params["url"]=url_final; fileshow(params)
elif url_final.find("vid.ag") >= 0: params["url"]=url_final; vid(params)
elif url_final.find("vidxtreme.to") >= 0: params["url"]=url_final; vidxtreme(params)
elif url_final.find("vidup") >= 0: params["url"]=url_final; vidup(params)
elif url_final.find("watchvideo") >= 0: params["url"]=url_final; watchvideo(params)
elif url_final.find("speedvid") >= 0: params["url"]=url_final; speedvid(params)
elif url_final.find("chefti.info") >= 0: params["url"]=url_final; exashare(params)
elif url_final.find("ajihezo.info") >= 0: params["url"]=url_final; exashare(params)
elif url_final.find("bojem3a.info") >= 0: params["url"]=url_final; exashare(params)
elif url_final.find("erd9x4.info") >= 0: params["url"]=url_final; exashare(params)
elif url_final.find("vodbeast") >= 0: params["url"]=url_final; vodbeast(params)
elif url_final.find("nosvideo") >= 0: params["url"]=url_final; nosvideo(params)
elif url_final.find("noslocker") >= 0: params["url"]=url_final; noslocker(params)
elif url_final.find("up2stream") >= 0: params["url"]=url_final; up2stream(params)
elif url_final.find("diskokosmiko") >= 0: params["url"]=url_final; diskokosmiko(params)
elif url_final.find("smartvid") >= 0: params["url"]=url_final; smartvid(params)
elif url_final.find("greevid") >= 0: params["url"]=url_final; greevid(params)
elif url_final.find("letwatch") >= 0: params["url"]=url_final; letwatch(params)
elif url_final.find("yourupload") >= 0: params["url"]=url_final; yourupload(params)
elif url_final.find("zalaa") >= 0: params["url"]=url_final; zalaa(params)
elif url_final.find("uploadc") >= 0: params["url"]=url_final; uploadc(params)
elif url_final.find("mp4upload") >= 0: params["url"]=url_final; mp4upload(params)
elif url_final.find("rapidvideo") >= 0: params["url"]=url_final; rapidvideo(params)
elif url_final.find("yourvideohost") >= 0: params["url"]=url_final; yourvideohost(params)
elif url_final.find("watchers") >= 0: params["url"]=url_final; watchers(params)
elif url_final.find("vidtodo") >= 0: params["url"]=url_final; vidtodo(params)
elif url_final.find("izanagi") >= 0: params["url"]=url_final; izanagi(params)
elif url_final.find("yotta") >= 0: params["url"]=url_final; yotta(params)
elif url_final.find("kami") >= 0: params["url"]=url_final; kami(params)
elif url_final.find("touchfile") >= 0: params["url"]=url_final; touchfile(params)
elif url_final.find("zstream") >= 0: params["url"]=url_final; zstream(params)
elif url_final.find("vodlock") >= 0: params["url"]=url_final; vodlock(params)
elif url_final.find("goodvideohost") >= 0: params["url"]=url_final; goodvideohost(params)
elif url_final.find("happystreams") >= 0: params["url"]=url_final; happystreams(params)
def parser_title(title):
#plugintools.log('[%s %s].parserr_title %s' % (addonName, addonVersion, title))
cyd=title;patcolor=plugintools.find_multiple_matches(cyd, '\[([^\]]+)')
for entry in patcolor:
entry='['+entry+']'
cyd=cyd.replace(entry, "")
cyd=cyd.replace("/", "").replace("[", "").replace("]", "").replace(""", '"')
cyd=cyd.replace("[/COLOR]", "").replace("[B]", "").replace("[/B]", "").replace("[I]", "").replace("[/I]", "")
cyd=cyd.replace("[Auto]", "").replace("[Parser]", "").replace("[TinyURL]", "").replace("[Auto]", "").replace("[Filtros]", "").replace("[Filtro]", "")
cyd = cyd.replace("[", "").replace("]", "").replace("[B]", "").replace("[I]", "").replace("[/B]", "").replace("[/I]", "") # Control para evitar errores al crear archivos
# Control para evitar filenames con corchetes
cyd = cyd.replace(" [Lista M3U]", "")
cyd = cyd.replace(" [Lista PLX]", "")
cyd = cyd.replace(" [Multilink]", "")
cyd = cyd.replace(" [Multi]", "").replace("[Multi]", "")
cyd = cyd.replace(" [Multiparser]", "")
cyd = cyd.replace(" [COLOR orange][Lista [B]PLX[/B]][/COLOR]", "")
cyd = cyd.replace(" [COLOR orange][Lista [B]M3U[/B]][/COLOR]", "")
cyd = cyd.replace(" [COLOR lightyellow][B][Dailymotion[/B] playlist][/COLOR]", "")
cyd = cyd.replace(" [COLOR lightyellow][B][Dailymotion[/B] video][/COLOR]", "")
cyd = cyd.replace(' [COLOR gold][CBZ][/COLOR]', "")
cyd = cyd.replace(' [COLOR gold][CBR][/COLOR]', "")
cyd = cyd.replace(' [COLOR gold][Mediafire][/COLOR]', "")
cyd = cyd.replace(' [CBZ]', "")
cyd = cyd.replace(' [CBR]', "")
cyd = cyd.replace(' [Mediafire]', "")
cyd = cyd.replace(' [EPG-TXT]', "")
title=cyd
if title.endswith(" .plx") == True:
title = title.replace(" .plx", ".plx")
return cyd
def launch_magnet(params):
plugintools.log('[%s %s] launch_magnet... %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
addon_magnet = plugintools.get_setting("addon_magnet")
plugintools.log("Addon para ejecutar archivo Magnet: "+addon_magnet)
#url = urllib.quote_plus(url)
if addon_magnet == "0": # Stream (por defecto)
url = 'plugin://plugin.video.stream/play/'+url
plugintools.log("Iniciando Stream... "+url)
elif addon_magnet == "1": # Pulsar
url = 'plugin://plugin.video.pulsar/play?uri=' + url
plugintools.log("Iniciando Pulsar... "+url)
elif addon_magnet == "2": # KMediaTorrent
url = 'plugin://plugin.video.kmediatorrent/play/'+url
plugintools.log("Iniciando KMediaTorrent... "+url)
elif addon_magnet == "3": # XBMCtorrent
url = 'plugin://plugin.video.xbmctorrent/play/'+url
plugintools.log("Iniciando XBMCtorrent... "+url)
elif addon_magnet == "4": # Quasar
url = 'plugin://plugin.video.quasar/play?uri=' + url
plugintools.log("Iniciando Quasar... "+url)
plugintools.log("Magnet URL= "+url)
plugintools.play_resolved_url(url)
def launch_torrent(params):
plugintools.log('[%s %s] launch_torrent... %s' % (addonName, addonVersion, repr(params)))
url = params.get("url")
addon_torrent = plugintools.get_setting("addon_torrent")
#url = urllib.quote_plus(url)
if addon_torrent == "0": # Stream (por defecto)
url = 'plugin://plugin.video.stream/play/'+url
plugintools.log("Iniciando Stream... "+url)
elif addon_torrent == "1": # Pulsar
url = 'plugin://plugin.video.pulsar/play?uri=' + url
plugintools.log("Iniciando Pulsar... "+url)
elif addon_torrent == "2": # XBMCtorrent
url = 'plugin://plugin.video.xbmctorrent/play/' + url
plugintools.log("Iniciando XBMCtorrent... "+url)
elif addon_torrent == "3": # Plexus
plugintools.log("Iniciando Plexus... "+url)
url = 'plugin://plugin.program.plexus/?url=http://'+url+'&mode=1&name='
elif addon_torrent == "4": # Quasar
url = 'plugin://plugin.video.quasar/play?uri=' + url
plugintools.log("Iniciando Quasar... "+url)
plugintools.log("Torrent File= "+url)
plugintools.play_resolved_url(url)
def devil_analyzer(url,ref):
url = 'plugin://plugin.video.SportsDevil/?mode=1&item=catcher%3dstreams%26url='+url+'%26referer='+referer
xbmc.executebuiltin('XBMC.RunPlugin(' + url +')')
| gpl-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/aio/operations/_express_route_service_providers_operations.py | 1 | 5138 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteServiceProvidersOperations:
"""ExpressRouteServiceProvidersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.ExpressRouteServiceProviderListResult"]:
"""Gets all the available express route service providers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteServiceProviderListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_07_01.models.ExpressRouteServiceProviderListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteServiceProviderListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteServiceProviderListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteServiceProviders'} # type: ignore
| mit |
techtonik/readthedocs.org | readthedocs/core/utils/__init__.py | 23 | 3304 | import getpass
import logging
import os
from urlparse import urlparse
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import get_template
from readthedocs.builds.constants import LATEST
from readthedocs.builds.constants import LATEST_VERBOSE_NAME
from readthedocs.builds.models import Build
log = logging.getLogger(__name__)
SYNC_USER = getattr(settings, 'SYNC_USER', getpass.getuser())
def run_on_app_servers(command):
"""
A helper to copy a single file across app servers
"""
log.info("Running %s on app servers" % command)
ret_val = 0
if getattr(settings, "MULTIPLE_APP_SERVERS", None):
for server in settings.MULTIPLE_APP_SERVERS:
ret = os.system("ssh %s@%s %s" % (SYNC_USER, server, command))
if ret != 0:
ret_val = ret
return ret_val
else:
ret = os.system(command)
return ret
def clean_url(url):
parsed = urlparse(url)
if parsed.scheme:
scheme, netloc = parsed.scheme, parsed.netloc
elif parsed.netloc:
scheme, netloc = "http", parsed.netloc
else:
scheme, netloc = "http", parsed.path
return netloc
def cname_to_slug(host):
from dns import resolver
answer = [ans for ans in resolver.query(host, 'CNAME')][0]
domain = answer.target.to_unicode()
slug = domain.split('.')[0]
return slug
def trigger_build(project, version=None, record=True, force=False, basic=False):
"""
An API to wrap the triggering of a build.
"""
# Avoid circular import
from readthedocs.projects.tasks import update_docs
if project.skip:
return None
if not version:
version = project.versions.get(slug=LATEST)
if record:
build = Build.objects.create(
project=project,
version=version,
type='html',
state='triggered',
success=True,
)
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic, build_pk=build.pk)
else:
build = None
update_docs.delay(pk=project.pk, version_pk=version.pk, record=record,
force=force, basic=basic)
return build
def send_email(recipient, subject, template, template_html, context=None,
request=None):
'''
Send multipart email
recipient
Email recipient address
subject
Email subject header
template
Plain text template to send
template_html
HTML template to send as new message part
context
A dictionary to pass into the template calls
request
Request object for determining absolute URL
'''
if request:
scheme = 'https' if request.is_secure() else 'http'
context['uri'] = '{scheme}://{host}'.format(scheme=scheme,
host=request.get_host())
ctx = {}
ctx.update(context)
msg = EmailMultiAlternatives(
subject,
get_template(template).render(ctx),
settings.DEFAULT_FROM_EMAIL,
[recipient]
)
msg.attach_alternative(get_template(template_html).render(ctx), 'text/html')
msg.send()
| mit |
nielsvanoch/django | tests/model_regress/models.py | 40 | 2244 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
#5218: Test models with non-default primary keys / AutoFields
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
#18432: Chained foreign keys with to_field produce incorrect query
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, unique=True, to_field='model1')
| bsd-3-clause |
infinnovation/micropython | docs/conf.py | 2 | 11264 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2017, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '1.9.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', '.venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/3.5', None)}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
| mit |
caotianwei/django | django/utils/lorem_ipsum.py | 505 | 4960 | """
Utility functions for generating "lorem ipsum" Latin text.
"""
from __future__ import unicode_literals
import random
COMMON_P = (
'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod '
'tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim '
'veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea '
'commodo consequat. Duis aute irure dolor in reprehenderit in voluptate '
'velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint '
'occaecat cupidatat non proident, sunt in culpa qui officia deserunt '
'mollit anim id est laborum.'
)
WORDS = ('exercitationem', 'perferendis', 'perspiciatis', 'laborum', 'eveniet',
'sunt', 'iure', 'nam', 'nobis', 'eum', 'cum', 'officiis', 'excepturi',
'odio', 'consectetur', 'quasi', 'aut', 'quisquam', 'vel', 'eligendi',
'itaque', 'non', 'odit', 'tempore', 'quaerat', 'dignissimos',
'facilis', 'neque', 'nihil', 'expedita', 'vitae', 'vero', 'ipsum',
'nisi', 'animi', 'cumque', 'pariatur', 'velit', 'modi', 'natus',
'iusto', 'eaque', 'sequi', 'illo', 'sed', 'ex', 'et', 'voluptatibus',
'tempora', 'veritatis', 'ratione', 'assumenda', 'incidunt', 'nostrum',
'placeat', 'aliquid', 'fuga', 'provident', 'praesentium', 'rem',
'necessitatibus', 'suscipit', 'adipisci', 'quidem', 'possimus',
'voluptas', 'debitis', 'sint', 'accusantium', 'unde', 'sapiente',
'voluptate', 'qui', 'aspernatur', 'laudantium', 'soluta', 'amet',
'quo', 'aliquam', 'saepe', 'culpa', 'libero', 'ipsa', 'dicta',
'reiciendis', 'nesciunt', 'doloribus', 'autem', 'impedit', 'minima',
'maiores', 'repudiandae', 'ipsam', 'obcaecati', 'ullam', 'enim',
'totam', 'delectus', 'ducimus', 'quis', 'voluptates', 'dolores',
'molestiae', 'harum', 'dolorem', 'quia', 'voluptatem', 'molestias',
'magni', 'distinctio', 'omnis', 'illum', 'dolorum', 'voluptatum', 'ea',
'quas', 'quam', 'corporis', 'quae', 'blanditiis', 'atque', 'deserunt',
'laboriosam', 'earum', 'consequuntur', 'hic', 'cupiditate',
'quibusdam', 'accusamus', 'ut', 'rerum', 'error', 'minus', 'eius',
'ab', 'ad', 'nemo', 'fugit', 'officia', 'at', 'in', 'id', 'quos',
'reprehenderit', 'numquam', 'iste', 'fugiat', 'sit', 'inventore',
'beatae', 'repellendus', 'magnam', 'recusandae', 'quod', 'explicabo',
'doloremque', 'aperiam', 'consequatur', 'asperiores', 'commodi',
'optio', 'dolor', 'labore', 'temporibus', 'repellat', 'veniam',
'architecto', 'est', 'esse', 'mollitia', 'nulla', 'a', 'similique',
'eos', 'alias', 'dolore', 'tenetur', 'deleniti', 'porro', 'facere',
'maxime', 'corrupti')
COMMON_WORDS = ('lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur',
'adipisicing', 'elit', 'sed', 'do', 'eiusmod', 'tempor', 'incididunt',
'ut', 'labore', 'et', 'dolore', 'magna', 'aliqua')
def sentence():
"""
Returns a randomly generated sentence of lorem ipsum text.
The first word is capitalized, and the sentence ends in either a period or
question mark. Commas are added at random.
"""
# Determine the number of comma-separated sections and number of words in
# each section for this sentence.
sections = [' '.join(random.sample(WORDS, random.randint(3, 12))) for i in range(random.randint(1, 5))]
s = ', '.join(sections)
# Convert to sentence case and add end punctuation.
return '%s%s%s' % (s[0].upper(), s[1:], random.choice('?.'))
def paragraph():
"""
Returns a randomly generated paragraph of lorem ipsum text.
The paragraph consists of between 1 and 4 sentences, inclusive.
"""
return ' '.join(sentence() for i in range(random.randint(1, 4)))
def paragraphs(count, common=True):
"""
Returns a list of paragraphs as returned by paragraph().
If `common` is True, then the first paragraph will be the standard
'lorem ipsum' paragraph. Otherwise, the first paragraph will be random
Latin text. Either way, subsequent paragraphs will be random Latin text.
"""
paras = []
for i in range(count):
if common and i == 0:
paras.append(COMMON_P)
else:
paras.append(paragraph())
return paras
def words(count, common=True):
"""
Returns a string of `count` lorem ipsum words separated by a single space.
If `common` is True, then the first 19 words will be the standard
'lorem ipsum' words. Otherwise, all words will be selected randomly.
"""
if common:
word_list = list(COMMON_WORDS)
else:
word_list = []
c = len(word_list)
if count > c:
count -= c
while count > 0:
c = min(count, len(WORDS))
count -= c
word_list += random.sample(WORDS, c)
else:
word_list = word_list[:count]
return ' '.join(word_list)
| bsd-3-clause |
SurfasJones/icecream-info | icecream/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 374 | 3274 | # urllib3/exceptions.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
## Leaf Exceptions
class MaxRetryError(RequestError):
"Raised when the maximum number of retries is exceeded."
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s" % url
if reason:
message += " (Caused by %s: %s)" % (type(reason), reason)
else:
message += " (Caused by redirect)"
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationParseError(ValueError, HTTPError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
| mit |
marcosdiez/ansible-modules-extras | monitoring/boundary_meter.py | 26 | 8349 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add boundary meters.
(c) 2013, curtis <curtis@serverascode.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
import datetime
import base64
import os
DOCUMENTATION = '''
module: boundary_meter
short_description: Manage boundary meters
description:
- This module manages boundary meters
version_added: "1.3"
author: "curtis (@ccollicutt)"
requirements:
- Boundary API access
- bprobe is required to send data, but not to register a meter
options:
name:
description:
- meter name
required: true
state:
description:
- Whether to create or remove the client from boundary
required: false
default: true
choices: ["present", "absent"]
apiid:
description:
- Organizations boundary API ID
required: true
apikey:
description:
- Organizations boundary API KEY
required: true
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
'''
EXAMPLES='''
- name: Create meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}"
- name: Delete meter
boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}"
'''
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
# "resource" like thing or apikey?
def auth_encode(apikey):
auth = base64.standard_b64encode(apikey)
auth.replace("\n", "")
return auth
def build_url(name, apiid, action, meter_id=None, cert_type=None):
if action == "create":
return 'https://%s/%s/meters' % (api_host, apiid)
elif action == "search":
return "https://%s/%s/meters?name=%s" % (api_host, apiid, name)
elif action == "certificates":
return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type)
elif action == "tags":
return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id)
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
else:
if cert_type is None:
url = build_url(name, apiid, action, meter_id)
else:
url = build_url(name, apiid, action, meter_id, cert_type)
headers = dict()
headers["Authorization"] = "Basic %s" % auth_encode(apikey)
headers["Content-Type"] = "application/json"
return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
# If the meter already exists, do nothing
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
body = '{"name":"' + name + '"}'
response, info = http_request(module, name, apiid, apikey, data=body, action="create")
if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
if not os.path.exists(config_directory):
try:
os.makedirs(config_directory)
except:
module.fail_json("Could not create " + config_directory)
# Download both cert files from the api host
types = ['key', 'cert']
for cert_type in types:
try:
# If we can't open the file it's not there, so we should download it
cert_file = open('%s/%s.pem' % (config_directory,cert_type))
except IOError:
# Now download the file...
rc = download_request(module, name, apiid, apikey, cert_type)
if rc == False:
module.fail_json("Download request for " + cert_type + ".pem failed")
return 0, "Meter " + name + " created"
def search_meter(module, name, apiid, apikey):
response, info = http_request(module, name, apiid, apikey, action="search")
if info['status'] != 200:
module.fail_json("Failed to connect to api host to search for meter")
# Return meters
return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
meters = search_meter(module, name, apiid, apikey)
if len(meters) > 0:
return meters[0]['id']
else:
return None
def delete_meter(module, name, apiid, apikey):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
response, info = http_request(module, name, apiid, apikey, action, meter_id)
if info['status'] != 200:
module.fail_json("Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
for cert_type in types:
try:
cert_file = '%s/%s.pem' % (config_directory,cert_type)
os.remove(cert_file)
except OSError, e:
module.fail_json("Failed to remove " + cert_type + ".pem file")
return 0, "Meter " + name + " deleted"
def download_request(module, name, apiid, apikey, cert_type):
meter_id = get_meter_id(module, name, apiid, apikey)
if meter_id is not None:
action = "certificates"
response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
if info['status'] != 200:
module.fail_json("Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close()
os.chmod(cert_file_path, 0600)
except:
module.fail_json("Could not write to certificate file")
return True
else:
module.fail_json("Could not get meter id")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
validate_certs = dict(default='yes', type='bool'),
)
)
state = module.params['state']
name= module.params['name']
apikey = module.params['api_key']
apiid = module.params['api_id']
if state == "present":
(rc, result) = create_meter(module, name, apiid, apikey)
if state == "absent":
(rc, result) = delete_meter(module, name, apiid, apikey)
if rc != 0:
module.fail_json(msg=result)
module.exit_json(status=result,changed=True)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
rproepp/spykeutils | spykeutils/signal_processing.py | 1 | 19012 | import copy
import quantities as pq
import scipy as sp
import scipy.signal
import scipy.special
import tools
default_kernel_area_fraction = 0.99999
class Kernel(object):
""" Base class for kernels. """
def __init__(self, kernel_size, normalize):
"""
:param kernel_size: Parameter controlling the kernel size.
:type kernel_size: Quantity 1D
:param bool normalize: Whether to normalize the kernel to unit area.
"""
self.kernel_size = kernel_size
self.normalize = normalize
def __call__(self, t, kernel_size=None):
""" Evaluates the kernel at all time points in the array `t`.
:param t: Time points to evaluate the kernel at.
:type t: Quantity 1D
:param kernel_size: If not `None` this overwrites the kernel size of
the `Kernel` instance.
:type kernel_size: Quantity scalar
:returns: The result of the kernel evaluations.
:rtype: Quantity 1D
"""
if kernel_size is None:
kernel_size = self.kernel_size
if self.normalize:
normalization = self.normalization_factor(kernel_size)
else:
normalization = 1.0 * pq.dimensionless
return self._evaluate(t, kernel_size) * normalization
def _evaluate(self, t, kernel_size):
""" Evaluates the kernel.
:param t: Time points to evaluate the kernel at.
:type t: Quantity 1D
:param kernel_size: Controls the width of the kernel.
:type kernel_size: Quantity scalar
:returns: The result of the kernel evaluations.
:rtype: Quantity 1D
"""
raise NotImplementedError()
def normalization_factor(self, kernel_size):
""" Returns the factor needed to normalize the kernel to unit area.
:param kernel_size: Controls the width of the kernel.
:type kernel_size: Quantity scalar
:returns: Factor to normalize the kernel to unit width.
:rtype: Quantity scalar
"""
raise NotImplementedError()
def boundary_enclosing_at_least(self, fraction):
""" Calculates the boundary :math:`b` so that the integral from
:math:`-b` to :math:`b` encloses at least a certain fraction of the
integral over the complete kernel.
:param float fraction: Fraction of the whole area which at least has to
be enclosed.
:returns: boundary
:rtype: Quantity scalar
"""
raise NotImplementedError()
def is_symmetric(self):
""" Should return `True` if the kernel is symmetric. """
return False
def summed_dist_matrix(self, vectors, presorted=False):
""" Calculates the sum of all element pair distances for each
pair of vectors.
If :math:`(a_1, \\dots, a_n)` and :math:`(b_1, \\dots, b_m)` are the
:math:`u`-th and :math:`v`-th vector from `vectors` and :math:`K` the
kernel, the resulting entry in the 2D array will be :math:`D_{uv}
= \\sum_{i=1}^{n} \\sum_{j=1}^{m} K(a_i - b_j)`.
:param sequence vectors: A sequence of Quantity 1D to calculate the
summed distances for each pair. The required units depend on the
kernel. Usually it will be the inverse unit of the kernel size.
:param bool presorted: Some optimized specializations of this function
may need sorted vectors. Set `presorted` to `True` if you know that
the passed vectors are already sorted to skip the sorting and thus
increase performance.
:rtype: Quantity 2D
"""
D = sp.empty((len(vectors), len(vectors)))
if len(vectors) > 0:
might_have_units = self(vectors[0])
if hasattr(might_have_units, 'units'):
D = D * might_have_units.units
else:
D = D * pq.dimensionless
for i, j in sp.ndindex(len(vectors), len(vectors)):
D[i, j] = sp.sum(self(
(vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
return D
class KernelFromFunction(Kernel):
""" Creates a kernel form a function. Please note, that not all methods for
such a kernel are implemented.
"""
def __init__(self, kernel_func, kernel_size):
Kernel.__init__(self, kernel_size, normalize=False)
self._evaluate = kernel_func
def is_symmetric(self):
return False
def as_kernel_of_size(obj, kernel_size):
""" Returns a kernel of desired size.
:param obj: Either an existing kernel or a kernel function. A kernel
function takes two arguments. First a `Quantity 1D` of evaluation time
points and second a kernel size.
:type obj: Kernel or func
:param kernel_size: Desired size of the kernel.
:type kernel_size: Quantity 1D
:returns: A :class:`Kernel` with the desired kernel size. If `obj` is
already a :class:`Kernel` instance, a shallow copy of this instance with
changed kernel size will be returned. If `obj` is a function it will be
wrapped in a :class:`Kernel` instance.
:rtype: :class:`Kernel`
"""
if isinstance(obj, Kernel):
obj = copy.copy(obj)
obj.kernel_size = kernel_size
else:
obj = KernelFromFunction(obj, kernel_size)
return obj
class SymmetricKernel(Kernel):
""" Base class for symmetric kernels. """
def __init__(self, kernel_size, normalize):
"""
:param kernel_size: Parameter controlling the kernel size.
:type kernel_size: Quantity 1D
:param bool normalize: Whether to normalize the kernel to unit area.
"""
Kernel.__init__(self, kernel_size, normalize)
def is_symmetric(self):
return True
def summed_dist_matrix(self, vectors, presorted=False):
D = sp.empty((len(vectors), len(vectors)))
if len(vectors) > 0:
might_have_units = self(vectors[0])
if hasattr(might_have_units, 'units'):
D = D * might_have_units.units
for i in xrange(len(vectors)):
for j in xrange(i, len(vectors)):
D[i, j] = D[j, i] = sp.sum(self(
(vectors[i] - sp.atleast_2d(vectors[j]).T).flatten()))
return D
class CausalDecayingExpKernel(Kernel):
r""" Unnormalized: :math:`K(t) = \exp(-\frac{t}{\tau}) \Theta(t)` with
:math:`\Theta(t) = \left\{\begin{array}{ll}0, & x < 0\\ 1, & x \geq
0\end{array}\right.` and kernel size :math:`\tau`.
Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)`
"""
@staticmethod
def evaluate(t, kernel_size):
return sp.piecewise(
t, [t < 0, t >= 0], [
lambda t: 0,
lambda t: sp.exp(
(-t * pq.dimensionless / kernel_size).simplified)])
def _evaluate(self, t, kernel_size):
return self.evaluate(t, kernel_size)
def normalization_factor(self, kernel_size):
return 1.0 / kernel_size
def __init__(self, kernel_size=1.0 * pq.s, normalize=True):
Kernel.__init__(self, kernel_size, normalize)
def boundary_enclosing_at_least(self, fraction):
return -self.kernel_size * sp.log(1.0 - fraction)
class GaussianKernel(SymmetricKernel):
r""" Unnormalized: :math:`K(t) = \exp(-\frac{t^2}{2 \sigma^2})` with kernel
size :math:`\sigma` (corresponds to the standard deviation of a Gaussian
distribution).
Normalized to unit area: :math:`K'(t) = \frac{1}{\sigma \sqrt{2 \pi}} K(t)`
"""
@staticmethod
def evaluate(t, kernel_size):
return sp.exp(
-0.5 * (t * pq.dimensionless / kernel_size).simplified ** 2)
def _evaluate(self, t, kernel_size):
return self.evaluate(t, kernel_size)
def normalization_factor(self, kernel_size):
return 1.0 / (sp.sqrt(2.0 * sp.pi) * kernel_size)
def __init__(self, kernel_size=1.0 * pq.s, normalize=True):
Kernel.__init__(self, kernel_size, normalize)
def boundary_enclosing_at_least(self, fraction):
return self.kernel_size * sp.sqrt(2.0) * \
scipy.special.erfinv(fraction + scipy.special.erf(0.0))
class LaplacianKernel(SymmetricKernel):
r""" Unnormalized: :math:`K(t) = \exp(-|\frac{t}{\tau}|)` with kernel size
:math:`\tau`.
Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)`
"""
@staticmethod
def evaluate(t, kernel_size):
return sp.exp(
-(sp.absolute(t) * pq.dimensionless / kernel_size).simplified)
def _evaluate(self, t, kernel_size):
return self.evaluate(t, kernel_size)
def normalization_factor(self, kernel_size):
return 0.5 / kernel_size
def __init__(self, kernel_size=1.0 * pq.s, normalize=True):
Kernel.__init__(self, kernel_size, normalize)
def boundary_enclosing_at_least(self, fraction):
return -self.kernel_size * sp.log(1.0 - fraction)
def summed_dist_matrix(self, vectors, presorted=False):
# This implementation is based on
#
# Houghton, C., & Kreuz, T. (2012). On the efficient calculation of van
# Rossum distances. Network: Computation in Neural Systems, 23(1-2),
# 48-58.
#
# Note that the cited paper contains some errors: In formula (9) the
# left side of the equation should be divided by two and in the last
# sum in this equation it should say `j|v_i >= u_i` instead of
# `j|v_i > u_i`. Also, in equation (11) it should say `j|u_i >= v_i`
# instead of `j|u_i > v_i`.
#
# Given N vectors with n entries on average the run-time complexity is
# O(N^2 * n). O(N^2 + N * n) memory will be needed.
if len(vectors) <= 0:
return sp.zeros((0, 0))
if not presorted:
vectors = [v.copy() for v in vectors]
for v in vectors:
v.sort()
sizes = sp.asarray([v.size for v in vectors])
values = sp.empty((len(vectors), max(1, sizes.max())))
values.fill(sp.nan)
for i, v in enumerate(vectors):
if v.size > 0:
values[i, :v.size] = \
(v / self.kernel_size * pq.dimensionless).simplified
exp_diffs = sp.exp(values[:, :-1] - values[:, 1:])
markage = sp.zeros(values.shape)
for u in xrange(len(vectors)):
markage[u, 0] = 0
for i in xrange(sizes[u] - 1):
markage[u, i + 1] = (markage[u, i] + 1.0) * exp_diffs[u, i]
# Same vector terms
D = sp.empty((len(vectors), len(vectors)))
D[sp.diag_indices_from(D)] = sizes + 2.0 * sp.sum(markage, axis=1)
# Cross vector terms
for u in xrange(D.shape[0]):
all_ks = sp.searchsorted(values[u], values, 'left') - 1
for v in xrange(u):
js = sp.searchsorted(values[v], values[u], 'right') - 1
ks = all_ks[v]
slice_j = sp.s_[sp.searchsorted(js, 0):sizes[u]]
slice_k = sp.s_[sp.searchsorted(ks, 0):sizes[v]]
D[u, v] = sp.sum(
sp.exp(values[v][js[slice_j]] - values[u][slice_j]) *
(1.0 + markage[v][js[slice_j]]))
D[u, v] += sp.sum(
sp.exp(values[u][ks[slice_k]] - values[v][slice_k]) *
(1.0 + markage[u][ks[slice_k]]))
D[v, u] = D[u, v]
if self.normalize:
normalization = self.normalization_factor(self.kernel_size)
else:
normalization = 1.0
return normalization * D
class RectangularKernel(SymmetricKernel):
r""" Unnormalized: :math:`K(t) = \left\{\begin{array}{ll}1, & |t| < \tau \\
0, & |t| \geq \tau\end{array} \right.` with kernel size :math:`\tau`
corresponding to the half width.
Normalized to unit area: :math:`K'(t) = \frac{1}{2 \tau} K(t)`
"""
@staticmethod
def evaluate(t, half_width):
return sp.absolute(t) < half_width
def _evaluate(self, t, kernel_size):
return self.evaluate(t, kernel_size)
def normalization_factor(self, half_width):
return 0.5 / half_width
def __init__(self, half_width=1.0 * pq.s, normalize=True):
Kernel.__init__(self, half_width, normalize)
def boundary_enclosing_at_least(self, fraction):
return self.kernel_size
class TriangularKernel(SymmetricKernel):
r""" Unnormalized: :math:`K(t) = \left\{ \begin{array}{ll}1
- \frac{|t|}{\tau}, & |t| < \tau \\ 0, & |t| \geq \tau \end{array} \right.`
with kernel size :math:`\tau` corresponding to the half width.
Normalized to unit area: :math:`K'(t) = \frac{1}{\tau} K(t)`
"""
@staticmethod
def evaluate(t, half_width):
return sp.maximum(
0.0,
(1.0 - sp.absolute(t.rescale(half_width.units)) * pq.dimensionless /
half_width).magnitude)
def _evaluate(self, t, kernel_size):
return self.evaluate(t, kernel_size)
def normalization_factor(self, half_width):
return 1.0 / half_width
def __init__(self, half_width=1.0 * pq.s, normalize=True):
Kernel.__init__(self, half_width, normalize)
def boundary_enclosing_at_least(self, fraction):
return self.kernel_size
def discretize_kernel(
kernel, sampling_rate, area_fraction=default_kernel_area_fraction,
num_bins=None, ensure_unit_area=False):
""" Discretizes a kernel.
:param kernel: The kernel or kernel function. If a kernel function is used
it should take exactly one 1-D array as argument.
:type kernel: :class:`Kernel` or function
:param float area_fraction: Fraction between 0 and 1 (exclusive)
of the integral of the kernel which will be at least covered by the
discretization. Will be ignored if `num_bins` is not `None`. If
`area_fraction` is used, the kernel has to provide a method
:meth:`boundary_enclosing_at_least` (see
:meth:`.Kernel.boundary_enclosing_at_least`).
:param sampling_rate: Sampling rate for the discretization. The unit will
typically be a frequency unit.
:type sampling_rate: Quantity scalar
:param int num_bins: Number of bins to use for the discretization.
:param bool ensure_unit_area: If `True`, the area of the discretized
kernel will be normalized to 1.0.
:rtype: Quantity 1D
"""
t_step = 1.0 / sampling_rate
if num_bins is not None:
start = -num_bins // 2
stop = num_bins // 2
elif area_fraction is not None:
boundary = kernel.boundary_enclosing_at_least(area_fraction)
if hasattr(boundary, 'rescale'):
boundary = boundary.rescale(t_step.units)
start = sp.ceil(-boundary / t_step)
stop = sp.floor(boundary / t_step) + 1
else:
raise ValueError(
"One of area_fraction and num_bins must not be None.")
k = kernel(sp.arange(start, stop) * t_step)
if ensure_unit_area:
k /= sp.sum(k) * t_step
return k
def smooth(
binned, kernel, sampling_rate, mode='same',
**kernel_discretization_params):
""" Smoothes a binned representation (e.g. of a spike train) by convolving
with a kernel.
:param binned: Bin array to smooth.
:type binned: 1-D array
:param kernel: The kernel instance to convolve with.
:type kernel: :class:`Kernel`
:param sampling_rate: The sampling rate which will be used to discretize the
kernel. It should be equal to the sampling rate used to obtain `binned`.
The unit will typically be a frequency unit.
:type sampling_rate: Quantity scalar
:param mode:
* 'same': The default which returns an array of the same size as
`binned`
* 'full': Returns an array with a bin for each shift where `binned` and
the discretized kernel overlap by at least one bin.
* 'valid': Returns only the discretization bins where the discretized
kernel and `binned` completely overlap.
See also `numpy.convolve
<http://docs.scipy.org/doc/numpy/reference/generated/numpy.convolve.html>`_.
:type mode: {'same', 'full', 'valid'}
:param dict kernel_discretization_params: Additional discretization
arguments which will be passed to :func:`.discretize_kernel`.
:returns: The smoothed representation of `binned`.
:rtype: Quantity 1D
"""
k = discretize_kernel(
kernel, sampling_rate=sampling_rate, **kernel_discretization_params)
return scipy.signal.convolve(binned, k, mode) * k.units
def st_convolve(
train, kernel, sampling_rate, mode='same', binning_params=None,
kernel_discretization_params=None):
""" Convolves a :class:`neo.core.SpikeTrain` with a kernel.
:param train: Spike train to convolve.
:type train: :class:`neo.core.SpikeTrain`
:param kernel: The kernel instance to convolve with.
:type kernel: :class:`Kernel`
:param sampling_rate: The sampling rate which will be used to bin
the spike train. The unit will typically be a frequency unit.
:type sampling_rate: Quantity scalar
:param mode:
* 'same': The default which returns an array covering the whole
duration of the spike train `train`.
* 'full': Returns an array with additional discretization bins in the
beginning and end so that for each spike the whole discretized
kernel is included.
* 'valid': Returns only the discretization bins where the discretized
kernel and spike train completely overlap.
See also :func:`scipy.signal.convolve`.
:type mode: {'same', 'full', 'valid'}
:param dict binning_params: Additional discretization arguments which will
be passed to :func:`.tools.bin_spike_trains`.
:param dict kernel_discretization_params: Additional discretization
arguments which will be passed to :func:`.discretize_kernel`.
:returns: The convolved spike train, the boundaries of the discretization
bins
:rtype: (Quantity 1D, Quantity 1D with the inverse units of `sampling_rate`)
"""
if binning_params is None:
binning_params = {}
if kernel_discretization_params is None:
kernel_discretization_params = {}
binned, bins = tools.bin_spike_trains(
{0: [train]}, sampling_rate, **binning_params)
binned = binned[0][0]
#sampling_rate = binned.size / (bins[-1] - bins[0])
result = smooth(
binned, kernel, sampling_rate, mode, **kernel_discretization_params)
assert (result.size - binned.size) % 2 == 0
num_additional_bins = (result.size - binned.size) // 2
if len(binned):
bins = sp.linspace(
bins[0] - num_additional_bins / sampling_rate,
bins[-1] + num_additional_bins / sampling_rate,
result.size + 1)
else:
bins = [] * pq.s
return result, bins
| bsd-3-clause |
NormandyTechnologyInc/moka-icon-theme | render-bitmaps-hidpi.py | 1 | 6615 | #!/usr/bin/python3
#
import os
import sys
import xml.sax
import subprocess
INKSCAPE = '/usr/bin/inkscape'
OPTIPNG = '/usr/bin/optipng'
MAINDIR = 'Moka'
SOURCES = ('src/#', 'src/A', 'src/B', 'src/C', 'src/D', 'src/E', 'src/F', 'src/G', 'src/H', 'src/I', 'src/J', 'src/K', 'src/L', 'src/M', 'src/N', 'src/O', 'src/P', 'src/Q', 'src/R', 'src/S', 'src/T', 'src/U', 'src/V', 'src/W', 'src/web', 'src/X', 'src/Y', 'src/Z')
inkscape_process = None
def main(SRC):
def optimize_png(png_file):
if os.path.exists(OPTIPNG):
process = subprocess.Popen([OPTIPNG, '-quiet', '-o7', png_file])
process.wait()
def wait_for_prompt(process, command=None):
if command is not None:
process.stdin.write((command+'\n').encode('utf-8'))
# This is kinda ugly ...
# Wait for just a '>', or '\n>' if some other char appearead first
output = process.stdout.read(1)
if output == b'>':
return
output += process.stdout.read(1)
while output != b'\n>':
output += process.stdout.read(1)
output = output[1:]
def start_inkscape():
process = subprocess.Popen([INKSCAPE, '--shell'], bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
wait_for_prompt(process)
return process
def inkscape_render_rect(icon_file, rect, output_file):
global inkscape_process
if inkscape_process is None:
inkscape_process = start_inkscape()
wait_for_prompt(inkscape_process, '--export-dpi=180 %s -i %s -e %s' % (icon_file, rect, output_file))
optimize_png(output_file)
class ContentHandler(xml.sax.ContentHandler):
ROOT = 0
SVG = 1
LAYER = 2
OTHER = 3
TEXT = 4
def __init__(self, path, force=False, filter=None):
self.stack = [self.ROOT]
self.inside = [self.ROOT]
self.path = path
self.rects = []
self.state = self.ROOT
self.chars = ""
self.force = force
self.filter = filter
def endDocument(self):
pass
def startElement(self, name, attrs):
if self.inside[-1] == self.ROOT:
if name == "svg":
self.stack.append(self.SVG)
self.inside.append(self.SVG)
return
elif self.inside[-1] == self.SVG:
if (name == "g" and ('inkscape:groupmode' in attrs) and ('inkscape:label' in attrs)
and attrs['inkscape:groupmode'] == 'layer' and attrs['inkscape:label'].startswith('Baseplate')):
self.stack.append(self.LAYER)
self.inside.append(self.LAYER)
self.context = None
self.icon_name = None
self.rects = []
return
elif self.inside[-1] == self.LAYER:
if name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'context':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='context'
self.chars = ""
return
elif name == "text" and ('inkscape:label' in attrs) and attrs['inkscape:label'] == 'icon-name':
self.stack.append(self.TEXT)
self.inside.append(self.TEXT)
self.text='icon-name'
self.chars = ""
return
elif name == "rect":
self.rects.append(attrs)
self.stack.append(self.OTHER)
def endElement(self, name):
stacked = self.stack.pop()
if self.inside[-1] == stacked:
self.inside.pop()
if stacked == self.TEXT and self.text is not None:
assert self.text in ['context', 'icon-name']
if self.text == 'context':
self.context = self.chars
elif self.text == 'icon-name':
self.icon_name = self.chars
self.text = None
elif stacked == self.LAYER:
assert self.icon_name
assert self.context
if self.filter is not None and not self.icon_name in self.filter:
return
print (self.context, self.icon_name)
for rect in self.rects:
width = rect['width']
height = rect['height']
id = rect['id']
dir = os.path.join(MAINDIR, "%sx%s@2x" % (width, height), self.context)
outfile = os.path.join(dir, self.icon_name+'.png')
if not os.path.exists(dir):
os.makedirs(dir)
# Do a time based check!
if self.force or not os.path.exists(outfile):
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
stat_in = os.stat(self.path)
stat_out = os.stat(outfile)
if stat_in.st_mtime > stat_out.st_mtime:
inkscape_render_rect(self.path, id, outfile)
sys.stdout.write('.')
else:
sys.stdout.write('-')
sys.stdout.flush()
sys.stdout.write('\n')
sys.stdout.flush()
def characters(self, chars):
self.chars += chars.strip()
if len(sys.argv) == 1:
if not os.path.exists(MAINDIR):
os.mkdir(MAINDIR)
print ('')
print ('Rendering from SVGs in', SRC)
print ('')
for file in os.listdir(SRC):
if file[-4:] == '.svg':
file = os.path.join(SRC, file)
handler = ContentHandler(file)
xml.sax.parse(open(file), handler)
print ('')
else:
file = os.path.join(SRC, sys.argv[1] + '.svg')
if len(sys.argv) > 2:
icons = sys.argv[2:]
else:
icons = None
if os.path.exists(os.path.join(file)):
handler = ContentHandler(file, True, filter=icons)
xml.sax.parse(open(file), handler)
else:
print ("Error: No such file", file)
sys.exit(1)
for source in SOURCES:
SRC = os.path.join('.', source)
main(SRC) | gpl-3.0 |
jscn/django | tests/gis_tests/gdal_tests/test_ds.py | 15 | 11594 | import os
import unittest
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from ..test_data import TEST_DATA, TestDS, get_ds_file
if HAS_GDAL:
from django.contrib.gis.gdal import DataSource, Envelope, OGRGeometry, GDALException, OGRIndexError, GDAL_VERSION
from django.contrib.gis.gdal.field import OFTReal, OFTInteger, OFTString
# List of acceptable data sources.
ds_list = (
TestDS(
'test_point', nfeat=5, nfld=3, geom='POINT', gtype=1, driver='ESRI Shapefile',
fields={'dbl': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.35011, 0.166623, -0.524093, 0.824508), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
field_values={
'dbl': [float(i) for i in range(1, 6)],
'int': list(range(1, 6)),
'str': [str(i) for i in range(1, 6)],
},
fids=range(5)
),
TestDS(
'test_vrt', ext='vrt', nfeat=3, nfld=3, geom='POINT', gtype='Point25D',
driver='OGR_VRT' if GDAL_VERSION >= (2, 0) else 'VRT',
fields={
'POINT_X': OFTString,
'POINT_Y': OFTString,
'NUM': OFTString,
}, # VRT uses CSV, which all types are OFTString.
extent=(1.0, 2.0, 100.0, 523.5), # Min/Max from CSV
field_values={
'POINT_X': ['1.0', '5.0', '100.0'],
'POINT_Y': ['2.0', '23.0', '523.5'],
'NUM': ['5', '17', '23'],
},
fids=range(1, 4)
),
TestDS(
'test_poly', nfeat=3, nfld=3, geom='POLYGON', gtype=3,
driver='ESRI Shapefile',
fields={'float': OFTReal, 'int': OFTInteger, 'str': OFTString},
extent=(-1.01513, -0.558245, 0.161876, 0.839637), # Got extent from QGIS
srs_wkt=(
'GEOGCS["GCS_WGS_1984",DATUM["WGS_1984",SPHEROID["WGS_1984",'
'6378137,298.257223563]],PRIMEM["Greenwich",0],UNIT["Degree",'
'0.017453292519943295]]'
),
)
)
bad_ds = (TestDS('foo'),)
@skipUnless(HAS_GDAL, "GDAL is required")
class DataSourceTest(unittest.TestCase):
def test01_valid_shp(self):
"Testing valid SHP Data Source files."
for source in ds_list:
# Loading up the data source
ds = DataSource(source.ds)
# Making sure the layer count is what's expected (only 1 layer in a SHP file)
self.assertEqual(1, len(ds))
# Making sure GetName works
self.assertEqual(source.ds, ds.name)
# Making sure the driver name matches up
self.assertEqual(source.driver, str(ds.driver))
# Making sure indexing works
try:
ds[len(ds)]
except OGRIndexError:
pass
else:
self.fail('Expected an IndexError!')
def test02_invalid_shp(self):
"Testing invalid SHP files for the Data Source."
for source in bad_ds:
with self.assertRaises(GDALException):
DataSource(source.ds)
def test03a_layers(self):
"Testing Data Source Layers."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer, this tests DataSource.__iter__
for layer in ds:
# Making sure we get the number of features we expect
self.assertEqual(len(layer), source.nfeat)
# Making sure we get the number of fields we expect
self.assertEqual(source.nfld, layer.num_fields)
self.assertEqual(source.nfld, len(layer.fields))
# Testing the layer's extent (an Envelope), and its properties
if source.driver == 'VRT' and (GDAL_VERSION >= (1, 7, 0) and GDAL_VERSION < (1, 7, 3)):
# There's a known GDAL regression with retrieving the extent
# of a VRT layer in versions 1.7.0-1.7.2:
# http://trac.osgeo.org/gdal/ticket/3783
pass
else:
self.assertIsInstance(layer.extent, Envelope)
self.assertAlmostEqual(source.extent[0], layer.extent.min_x, 5)
self.assertAlmostEqual(source.extent[1], layer.extent.min_y, 5)
self.assertAlmostEqual(source.extent[2], layer.extent.max_x, 5)
self.assertAlmostEqual(source.extent[3], layer.extent.max_y, 5)
# Now checking the field names.
flds = layer.fields
for f in flds:
self.assertIn(f, source.fields)
# Negative FIDs are not allowed.
with self.assertRaises(OGRIndexError):
layer.__getitem__(-1)
with self.assertRaises(OGRIndexError):
layer.__getitem__(50000)
if hasattr(source, 'field_values'):
fld_names = source.field_values.keys()
# Testing `Layer.get_fields` (which uses Layer.__iter__)
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name], layer.get_fields(fld_name))
# Testing `Layer.__getitem__`.
for i, fid in enumerate(source.fids):
feat = layer[fid]
self.assertEqual(fid, feat.fid)
# Maybe this should be in the test below, but we might as well test
# the feature values here while in this loop.
for fld_name in fld_names:
self.assertEqual(source.field_values[fld_name][i], feat.get(fld_name))
def test03b_layer_slice(self):
"Test indexing and slicing on Layers."
# Using the first data-source because the same slice
# can be used for both the layer and the control values.
source = ds_list[0]
ds = DataSource(source.ds)
sl = slice(1, 3)
feats = ds[0][sl]
for fld_name in ds[0].fields:
test_vals = [feat.get(fld_name) for feat in feats]
control_vals = source.field_values[fld_name][sl]
self.assertEqual(control_vals, test_vals)
def test03c_layer_references(self):
"""
Ensure OGR objects keep references to the objects they belong to.
"""
source = ds_list[0]
# See ticket #9448.
def get_layer():
# This DataSource object is not accessible outside this
# scope. However, a reference should still be kept alive
# on the `Layer` returned.
ds = DataSource(source.ds)
return ds[0]
# Making sure we can call OGR routines on the Layer returned.
lyr = get_layer()
self.assertEqual(source.nfeat, len(lyr))
self.assertEqual(source.gtype, lyr.geom_type.num)
# Same issue for Feature/Field objects, see #18640
self.assertEqual(str(lyr[0]['str']), "1")
def test04_features(self):
"Testing Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer
for layer in ds:
# Incrementing through each feature in the layer
for feat in layer:
# Making sure the number of fields, and the geometry type
# are what's expected.
self.assertEqual(source.nfld, len(list(feat)))
self.assertEqual(source.gtype, feat.geom_type)
# Making sure the fields match to an appropriate OFT type.
for k, v in source.fields.items():
# Making sure we get the proper OGR Field instance, using
# a string value index for the feature.
self.assertIsInstance(feat[k], v)
# Testing Feature.__iter__
for fld in feat:
self.assertIn(fld.name, source.fields.keys())
def test05_geometries(self):
"Testing Geometries from Data Source Features."
for source in ds_list:
ds = DataSource(source.ds)
# Incrementing through each layer and feature.
for layer in ds:
for feat in layer:
g = feat.geom
# Making sure we get the right Geometry name & type
self.assertEqual(source.geom, g.geom_name)
self.assertEqual(source.gtype, g.geom_type)
# Making sure the SpatialReference is as expected.
if hasattr(source, 'srs_wkt'):
self.assertEqual(
source.srs_wkt,
# Depending on lib versions, WGS_84 might be WGS_1984
g.srs.wkt.replace('SPHEROID["WGS_84"', 'SPHEROID["WGS_1984"')
)
def test06_spatial_filter(self):
"Testing the Layer.spatial_filter property."
ds = DataSource(get_ds_file('cities', 'shp'))
lyr = ds[0]
# When not set, it should be None.
self.assertIsNone(lyr.spatial_filter)
# Must be set a/an OGRGeometry or 4-tuple.
with self.assertRaises(TypeError):
lyr._set_spatial_filter('foo')
# Setting the spatial filter with a tuple/list with the extent of
# a buffer centering around Pueblo.
with self.assertRaises(ValueError):
lyr._set_spatial_filter(list(range(5)))
filter_extent = (-105.609252, 37.255001, -103.609252, 39.255001)
lyr.spatial_filter = (-105.609252, 37.255001, -103.609252, 39.255001)
self.assertEqual(OGRGeometry.from_bbox(filter_extent), lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Pueblo', feats[0].get('Name'))
# Setting the spatial filter with an OGRGeometry for buffer centering
# around Houston.
filter_geom = OGRGeometry(
'POLYGON((-96.363151 28.763374,-94.363151 28.763374,'
'-94.363151 30.763374,-96.363151 30.763374,-96.363151 28.763374))'
)
lyr.spatial_filter = filter_geom
self.assertEqual(filter_geom, lyr.spatial_filter)
feats = [feat for feat in lyr]
self.assertEqual(1, len(feats))
self.assertEqual('Houston', feats[0].get('Name'))
# Clearing the spatial filter by setting it to None. Now
# should indicate that there are 3 features in the Layer.
lyr.spatial_filter = None
self.assertEqual(3, len(lyr))
def test07_integer_overflow(self):
"Testing that OFTReal fields, treated as OFTInteger, do not overflow."
# Using *.dbf from Census 2010 TIGER Shapefile for Texas,
# which has land area ('ALAND10') stored in a Real field
# with no precision.
ds = DataSource(os.path.join(TEST_DATA, 'texas.dbf'))
feat = ds[0][0]
# Reference value obtained using `ogrinfo`.
self.assertEqual(676586997978, feat.get('ALAND10'))
| bsd-3-clause |
Teagan42/home-assistant | homeassistant/components/shell_command/__init__.py | 3 | 3285 | """Expose regular shell commands as services."""
import asyncio
import logging
import shlex
import voluptuous as vol
from homeassistant.core import ServiceCall
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
DOMAIN = "shell_command"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(cv.string)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the shell_command component."""
conf = config.get(DOMAIN, {})
cache = {}
async def async_service_handler(service: ServiceCall) -> None:
"""Execute a shell command service."""
cmd = conf[service.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif " " not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(" ", 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.async_render(service.data)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_shell(
cmd,
loop=hass.loop,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
# Template used. Break into list and use create_subprocess_exec
# (which uses shell=False) for security
shlexed_cmd = [prog] + shlex.split(rendered_args)
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_exec(
*shlexed_cmd,
loop=hass.loop,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
process = await create_process
stdout_data, stderr_data = await process.communicate()
if stdout_data:
_LOGGER.debug(
"Stdout of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stdout_data,
)
if stderr_data:
_LOGGER.debug(
"Stderr of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stderr_data,
)
if process.returncode != 0:
_LOGGER.exception(
"Error running command: `%s`, return code: %s", cmd, process.returncode
)
for name in conf.keys():
hass.services.async_register(DOMAIN, name, async_service_handler)
return True
| apache-2.0 |
Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/crispy_forms/templatetags/crispy_forms_tags.py | 6 | 10757 | # -*- coding: utf-8 -*-
from copy import copy
import django
from django.conf import settings
from django.forms.formsets import BaseFormSet
from django.template import Context
from django.template.loader import get_template
from django import template
from crispy_forms.helper import FormHelper
from crispy_forms.compatibility import lru_cache, string_types
register = template.Library()
# We import the filters, so they are available when doing load crispy_forms_tags
from crispy_forms.templatetags.crispy_forms_filters import *
from crispy_forms.utils import TEMPLATE_PACK, get_template_pack
class ForLoopSimulator(object):
"""
Simulates a forloop tag, precisely::
{% for form in formset.forms %}
If `{% crispy %}` is rendering a formset with a helper, We inject a `ForLoopSimulator` object
in the context as `forloop` so that formset forms can do things like::
Fieldset("Item {{ forloop.counter }}", [...])
HTML("{% if forloop.first %}First form text{% endif %}"
"""
def __init__(self, formset):
self.len_values = len(formset.forms)
# Shortcuts for current loop iteration number.
self.counter = 1
self.counter0 = 0
# Reverse counter iteration numbers.
self.revcounter = self.len_values
self.revcounter0 = self.len_values - 1
# Boolean values designating first and last times through loop.
self.first = True
self.last = (0 == self.len_values - 1)
def iterate(self):
"""
Updates values as if we had iterated over the for
"""
self.counter += 1
self.counter0 += 1
self.revcounter -= 1
self.revcounter0 -= 1
self.first = False
self.last = (self.revcounter0 == self.len_values - 1)
def copy_context(context):
"""
Copies a `Context` variable. It uses `Context.__copy__` if available
(introduced in Django 1.3) or copy otherwise.
"""
if hasattr(context, "__copy__"):
return context.__copy__()
duplicate = copy(context)
duplicate.dicts = context.dicts[:]
return duplicate
class BasicNode(template.Node):
"""
Basic Node object that we can rely on for Node objects in normal
template tags. I created this because most of the tags we'll be using
will need both the form object and the helper string. This handles
both the form object and parses out the helper string into attributes
that templates can easily handle.
"""
def __init__(self, form, helper, template_pack=None):
self.form = form
if helper is not None:
self.helper = helper
else:
self.helper = None
self.template_pack = template_pack or get_template_pack()
def get_render(self, context):
"""
Returns a `Context` object with all the necessary stuff for rendering the form
:param context: `django.template.Context` variable holding the context for the node
`self.form` and `self.helper` are resolved into real Python objects resolving them
from the `context`. The `actual_form` can be a form or a formset. If it's a formset
`is_formset` is set to True. If the helper has a layout we use it, for rendering the
form or the formset's forms.
"""
# Nodes are not thread safe in multithreaded environments
# https://docs.djangoproject.com/en/dev/howto/custom-template-tags/#thread-safety-considerations
if self not in context.render_context:
context.render_context[self] = (
template.Variable(self.form),
template.Variable(self.helper) if self.helper else None
)
form, helper = context.render_context[self]
actual_form = form.resolve(context)
if self.helper is not None:
helper = helper.resolve(context)
else:
# If the user names the helper within the form `helper` (standard), we use it
# This allows us to have simplified tag syntax: {% crispy form %}
helper = FormHelper() if not hasattr(actual_form, 'helper') else actual_form.helper
# use template_pack from helper, if defined
try:
if helper.template_pack:
self.template_pack = helper.template_pack
except AttributeError:
pass
self.actual_helper = helper
# We get the response dictionary
is_formset = isinstance(actual_form, BaseFormSet)
response_dict = self.get_response_dict(helper, context, is_formset)
node_context = copy_context(context)
node_context.update(response_dict)
# If we have a helper's layout we use it, for the form or the formset's forms
if helper and helper.layout:
if not is_formset:
actual_form.form_html = helper.render_layout(actual_form, node_context, template_pack=self.template_pack)
else:
forloop = ForLoopSimulator(actual_form)
helper.render_hidden_fields = True
for form in actual_form:
node_context.update({'forloop': forloop})
form.form_html = helper.render_layout(form, node_context, template_pack=self.template_pack)
forloop.iterate()
if is_formset:
response_dict.update({'formset': actual_form})
else:
response_dict.update({'form': actual_form})
return Context(response_dict)
def get_response_dict(self, helper, context, is_formset):
"""
Returns a dictionary with all the parameters necessary to render the form/formset in a template.
:param context: `django.template.Context` for the node
:param is_formset: Boolean value. If set to True, indicates we are working with a formset.
"""
if not isinstance(helper, FormHelper):
raise TypeError('helper object provided to {% crispy %} tag must be a crispy.helper.FormHelper object.')
attrs = helper.get_attributes(template_pack=self.template_pack)
form_type = "form"
if is_formset:
form_type = "formset"
# We take form/formset parameters from attrs if they are set, otherwise we use defaults
response_dict = {
'template_pack': self.template_pack,
'%s_action' % form_type: attrs['attrs'].get("action", ''),
'%s_method' % form_type: attrs.get("form_method", 'post'),
'%s_tag' % form_type: attrs.get("form_tag", True),
'%s_class' % form_type: attrs['attrs'].get("class", ''),
'%s_id' % form_type: attrs['attrs'].get("id", ""),
'%s_style' % form_type: attrs.get("form_style", None),
'form_error_title': attrs.get("form_error_title", None),
'formset_error_title': attrs.get("formset_error_title", None),
'form_show_errors': attrs.get("form_show_errors", True),
'help_text_inline': attrs.get("help_text_inline", False),
'html5_required': attrs.get("html5_required", False),
'form_show_labels': attrs.get("form_show_labels", True),
'disable_csrf': attrs.get("disable_csrf", False),
'inputs': attrs.get('inputs', []),
'is_formset': is_formset,
'%s_attrs' % form_type: attrs.get('attrs', ''),
'flat_attrs': attrs.get('flat_attrs', ''),
'error_text_inline': attrs.get('error_text_inline', True),
'label_class': attrs.get('label_class', ''),
'label_size': attrs.get('label_size', 0),
'field_class': attrs.get('field_class', ''),
'include_media': attrs.get('include_media', True),
}
# Handles custom attributes added to helpers
for attribute_name, value in attrs.items():
if attribute_name not in response_dict:
response_dict[attribute_name] = value
if 'csrf_token' in context:
response_dict['csrf_token'] = context['csrf_token']
return response_dict
@lru_cache()
def whole_uni_formset_template(template_pack=TEMPLATE_PACK):
return get_template('%s/whole_uni_formset.html' % template_pack)
@lru_cache()
def whole_uni_form_template(template_pack=TEMPLATE_PACK):
return get_template('%s/whole_uni_form.html' % template_pack)
class CrispyFormNode(BasicNode):
def render(self, context):
c = self.get_render(context)
if self.actual_helper is not None and getattr(self.actual_helper, 'template', False):
template = get_template(self.actual_helper.template)
else:
if c['is_formset']:
template = whole_uni_formset_template(self.template_pack)
else:
template = whole_uni_form_template(self.template_pack)
if django.VERSION >= (1, 8):
c = c.flatten()
return template.render(c)
# {% crispy %} tag
@register.tag(name="crispy")
def do_uni_form(parser, token):
"""
You need to pass in at least the form/formset object, and can also pass in the
optional `crispy_forms.helpers.FormHelper` object.
helper (optional): A `crispy_forms.helper.FormHelper` object.
Usage::
{% load crispy_tags %}
{% crispy form form.helper %}
You can also provide the template pack as the third argument::
{% crispy form form.helper 'bootstrap' %}
If the `FormHelper` attribute is named `helper` you can simply do::
{% crispy form %}
{% crispy form 'bootstrap' %}
"""
token = token.split_contents()
form = token.pop(1)
helper = None
template_pack = "'%s'" % get_template_pack()
# {% crispy form helper %}
try:
helper = token.pop(1)
except IndexError:
pass
# {% crispy form helper 'bootstrap' %}
try:
template_pack = token.pop(1)
except IndexError:
pass
# {% crispy form 'bootstrap' %}
if (
helper is not None and
isinstance(helper, string_types) and
("'" in helper or '"' in helper)
):
template_pack = helper
helper = None
if template_pack is not None:
template_pack = template_pack[1:-1]
ALLOWED_TEMPLATE_PACKS = getattr(
settings,
'CRISPY_ALLOWED_TEMPLATE_PACKS',
('bootstrap', 'uni_form', 'bootstrap3', 'bootstrap4')
)
if template_pack not in ALLOWED_TEMPLATE_PACKS:
raise template.TemplateSyntaxError(
"crispy tag's template_pack argument should be in %s" %
str(ALLOWED_TEMPLATE_PACKS)
)
return CrispyFormNode(form, helper, template_pack=template_pack)
| gpl-3.0 |
deepsrijit1105/edx-platform | common/djangoapps/student/tests/test_views.py | 15 | 8209 | """
Test the student dashboard view.
"""
import unittest
import ddt
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import TestCase
from edx_oauth2_provider.constants import AUTHORIZED_CLIENTS_SESSION_KEY
from edx_oauth2_provider.tests.factories import ClientFactory, TrustedClientFactory
from mock import patch
from pyquery import PyQuery as pq
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from student.helpers import DISABLE_UNENROLL_CERT_STATES
from student.models import CourseEnrollment, LogoutViewConfiguration
from student.tests.factories import UserFactory, CourseEnrollmentFactory
PASSWORD = 'test'
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TestStudentDashboardUnenrollments(SharedModuleStoreTestCase):
"""
Test to ensure that the student dashboard does not show the unenroll button for users with certificates.
"""
UNENROLL_ELEMENT_ID = "#actions-item-unenroll-0"
@classmethod
def setUpClass(cls):
super(TestStudentDashboardUnenrollments, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
""" Create a course and user, then log in. """
super(TestStudentDashboardUnenrollments, self).setUp()
self.user = UserFactory()
CourseEnrollmentFactory(course_id=self.course.id, user=self.user)
self.cert_status = None
self.client.login(username=self.user.username, password=PASSWORD)
def mock_cert(self, _user, _course_overview, _course_mode):
""" Return a preset certificate status. """
if self.cert_status is not None:
return {
'status': self.cert_status,
'can_unenroll': self.cert_status not in DISABLE_UNENROLL_CERT_STATES
}
else:
return {}
@ddt.data(
('notpassing', 1),
('restricted', 1),
('processing', 1),
(None, 1),
('generating', 0),
('ready', 0),
)
@ddt.unpack
def test_unenroll_available(self, cert_status, unenroll_action_count):
""" Assert that the unenroll action is shown or not based on the cert status."""
self.cert_status = cert_status
with patch('student.views.cert_info', side_effect=self.mock_cert):
response = self.client.get(reverse('dashboard'))
self.assertEqual(pq(response.content)(self.UNENROLL_ELEMENT_ID).length, unenroll_action_count)
@ddt.data(
('notpassing', 200),
('restricted', 200),
('processing', 200),
(None, 200),
('generating', 400),
('ready', 400),
)
@ddt.unpack
@patch.object(CourseEnrollment, 'unenroll')
def test_unenroll_request(self, cert_status, status_code, course_enrollment):
""" Assert that the unenroll method is called or not based on the cert status"""
self.cert_status = cert_status
with patch('student.views.cert_info', side_effect=self.mock_cert):
response = self.client.post(
reverse('change_enrollment'),
{'enrollment_action': 'unenroll', 'course_id': self.course.id}
)
self.assertEqual(response.status_code, status_code)
if status_code == 200:
course_enrollment.assert_called_with(self.user, self.course.id)
else:
course_enrollment.assert_not_called()
def test_no_cert_status(self):
""" Assert that the dashboard loads when cert_status is None."""
with patch('student.views.cert_info', return_value=None):
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
def test_cant_unenroll_status(self):
""" Assert that the dashboard loads when cert_status does not allow for unenrollment"""
with patch('certificates.models.certificate_status_for_student', return_value={'status': 'ready'}):
response = self.client.get(reverse('dashboard'))
self.assertEqual(response.status_code, 200)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class LogoutTests(TestCase):
""" Tests for the logout functionality. """
def setUp(self):
""" Create a course and user, then log in. """
super(LogoutTests, self).setUp()
self.user = UserFactory()
self.client.login(username=self.user.username, password=PASSWORD)
LogoutViewConfiguration.objects.create(enabled=True)
def create_oauth_client(self):
""" Creates a trusted OAuth client. """
client = ClientFactory(logout_uri='https://www.example.com/logout/')
TrustedClientFactory(client=client)
return client
def assert_session_logged_out(self, oauth_client, **logout_headers):
""" Authenticates a user via OAuth 2.0, logs out, and verifies the session is logged out. """
self.authenticate_with_oauth(oauth_client)
# Logging out should remove the session variables, and send a list of logout URLs to the template.
# The template will handle loading those URLs and redirecting the user. That functionality is not tested here.
response = self.client.get(reverse('logout'), **logout_headers)
self.assertEqual(response.status_code, 200)
self.assertNotIn(AUTHORIZED_CLIENTS_SESSION_KEY, self.client.session)
return response
def authenticate_with_oauth(self, oauth_client):
""" Perform an OAuth authentication using the current web client.
This should add an AUTHORIZED_CLIENTS_SESSION_KEY entry to the current session.
"""
data = {
'client_id': oauth_client.client_id,
'client_secret': oauth_client.client_secret,
'response_type': 'code'
}
# Authenticate with OAuth to set the appropriate session values
self.client.post(reverse('oauth2:capture'), data, follow=True)
self.assertListEqual(self.client.session[AUTHORIZED_CLIENTS_SESSION_KEY], [oauth_client.client_id])
def assert_logout_redirects(self):
""" Verify logging out redirects the user to the homepage. """
response = self.client.get(reverse('logout'))
self.assertRedirects(response, '/', fetch_redirect_response=False)
def test_switch(self):
""" Verify the IDA logout functionality is disabled if the associated switch is disabled. """
LogoutViewConfiguration.objects.create(enabled=False)
oauth_client = self.create_oauth_client()
self.authenticate_with_oauth(oauth_client)
self.assert_logout_redirects()
def test_without_session_value(self):
""" Verify logout works even if the session does not contain an entry with
the authenticated OpenID Connect clients."""
self.assert_logout_redirects()
def test_client_logout(self):
""" Verify the context includes a list of the logout URIs of the authenticated OpenID Connect clients.
The list should only include URIs of the clients for which the user has been authenticated.
"""
client = self.create_oauth_client()
response = self.assert_session_logged_out(client)
expected = {
'logout_uris': [client.logout_uri + '?no_redirect=1'], # pylint: disable=no-member
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data) # pylint: disable=no-member
def test_filter_referring_service(self):
""" Verify that, if the user is directed to the logout page from a service, that service's logout URL
is not included in the context sent to the template.
"""
client = self.create_oauth_client()
response = self.assert_session_logged_out(client, HTTP_REFERER=client.logout_uri) # pylint: disable=no-member
expected = {
'logout_uris': [],
'target': '/',
}
self.assertDictContainsSubset(expected, response.context_data) # pylint: disable=no-member
| agpl-3.0 |
highweb-project/highweb-webcl-html5spec | build/android/pylib/results/flakiness_dashboard/json_results_generator_unittest.py | 40 | 7206 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Most of this file was ported over from Blink's
# webkitpy/layout_tests/layout_package/json_results_generator_unittest.py
#
import unittest
import json
from pylib.results.flakiness_dashboard import json_results_generator
class JSONGeneratorTest(unittest.TestCase):
def setUp(self):
self.builder_name = 'DUMMY_BUILDER_NAME'
self.build_name = 'DUMMY_BUILD_NAME'
self.build_number = 'DUMMY_BUILDER_NUMBER'
# For archived results.
self._json = None
self._num_runs = 0
self._tests_set = set([])
self._test_timings = {}
self._failed_count_map = {}
self._PASS_count = 0
self._DISABLED_count = 0
self._FLAKY_count = 0
self._FAILS_count = 0
self._fixable_count = 0
self._orig_write_json = json_results_generator.WriteJSON
# unused arguments ... pylint: disable=W0613
def _WriteJSONStub(json_object, file_path, callback=None):
pass
json_results_generator.WriteJSON = _WriteJSONStub
def tearDown(self):
json_results_generator.WriteJSON = self._orig_write_json
def _TestJSONGeneration(self, passed_tests_list, failed_tests_list):
tests_set = set(passed_tests_list) | set(failed_tests_list)
DISABLED_tests = set([t for t in tests_set
if t.startswith('DISABLED_')])
FLAKY_tests = set([t for t in tests_set
if t.startswith('FLAKY_')])
FAILS_tests = set([t for t in tests_set
if t.startswith('FAILS_')])
PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests)
failed_tests = set(failed_tests_list) - DISABLED_tests
failed_count_map = dict([(t, 1) for t in failed_tests])
test_timings = {}
i = 0
for test in tests_set:
test_timings[test] = float(self._num_runs * 100 + i)
i += 1
test_results_map = dict()
for test in tests_set:
test_results_map[test] = json_results_generator.TestResult(
test, failed=(test in failed_tests),
elapsed_time=test_timings[test])
generator = json_results_generator.JSONResultsGeneratorBase(
self.builder_name, self.build_name, self.build_number,
'',
None, # don't fetch past json results archive
test_results_map)
failed_count_map = dict([(t, 1) for t in failed_tests])
# Test incremental json results
incremental_json = generator.GetJSON()
self._VerifyJSONResults(
tests_set,
test_timings,
failed_count_map,
len(PASS_tests),
len(DISABLED_tests),
len(FLAKY_tests),
len(DISABLED_tests | failed_tests),
incremental_json,
1)
# We don't verify the results here, but at least we make sure the code
# runs without errors.
generator.GenerateJSONOutput()
generator.GenerateTimesMSFile()
def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map,
PASS_count, DISABLED_count, FLAKY_count,
fixable_count, json_obj, num_runs):
# Aliasing to a short name for better access to its constants.
JRG = json_results_generator.JSONResultsGeneratorBase
self.assertIn(JRG.VERSION_KEY, json_obj)
self.assertIn(self.builder_name, json_obj)
buildinfo = json_obj[self.builder_name]
self.assertIn(JRG.FIXABLE, buildinfo)
self.assertIn(JRG.TESTS, buildinfo)
self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs)
self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number)
if tests_set or DISABLED_count:
fixable = {}
for fixable_items in buildinfo[JRG.FIXABLE]:
for (result_type, count) in fixable_items.iteritems():
if result_type in fixable:
fixable[result_type] = fixable[result_type] + count
else:
fixable[result_type] = count
if PASS_count:
self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count)
else:
self.assertTrue(JRG.PASS_RESULT not in fixable or
fixable[JRG.PASS_RESULT] == 0)
if DISABLED_count:
self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count)
else:
self.assertTrue(JRG.SKIP_RESULT not in fixable or
fixable[JRG.SKIP_RESULT] == 0)
if FLAKY_count:
self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count)
else:
self.assertTrue(JRG.FLAKY_RESULT not in fixable or
fixable[JRG.FLAKY_RESULT] == 0)
if failed_count_map:
tests = buildinfo[JRG.TESTS]
for test_name in failed_count_map.iterkeys():
test = self._FindTestInTrie(test_name, tests)
failed = 0
for result in test[JRG.RESULTS]:
if result[1] == JRG.FAIL_RESULT:
failed += result[0]
self.assertEqual(failed_count_map[test_name], failed)
timing_count = 0
for timings in test[JRG.TIMES]:
if timings[1] == test_timings[test_name]:
timing_count = timings[0]
self.assertEqual(1, timing_count)
if fixable_count:
self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count)
def _FindTestInTrie(self, path, trie):
nodes = path.split('/')
sub_trie = trie
for node in nodes:
self.assertIn(node, sub_trie)
sub_trie = sub_trie[node]
return sub_trie
def testJSONGeneration(self):
self._TestJSONGeneration([], [])
self._TestJSONGeneration(['A1', 'B1'], [])
self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2'])
self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], [])
self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4'])
self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5'])
self._TestJSONGeneration(
['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'],
['FAILS_D6'])
# Generate JSON with the same test sets. (Both incremental results and
# archived results must be updated appropriately.)
self._TestJSONGeneration(
['A', 'FLAKY_B', 'DISABLED_C'],
['FAILS_D', 'FLAKY_E'])
self._TestJSONGeneration(
['A', 'DISABLED_C', 'FLAKY_E'],
['FLAKY_B', 'FAILS_D'])
self._TestJSONGeneration(
['FLAKY_B', 'DISABLED_C', 'FAILS_D'],
['A', 'FLAKY_E'])
def testHierarchicalJSNGeneration(self):
# FIXME: Re-work tests to be more comprehensible and comprehensive.
self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C'])
def testTestTimingsTrie(self):
individual_test_timings = []
individual_test_timings.append(
json_results_generator.TestResult(
'foo/bar/baz.html',
elapsed_time=1.2))
individual_test_timings.append(
json_results_generator.TestResult('bar.html', elapsed_time=0.0001))
trie = json_results_generator.TestTimingsTrie(individual_test_timings)
expected_trie = {
'bar.html': 0,
'foo': {
'bar': {
'baz.html': 1200,
}
}
}
self.assertEqual(json.dumps(trie), json.dumps(expected_trie))
| bsd-3-clause |
passiweinberger/nupic | src/nupic/frameworks/opf/opfenvironment.py | 40 | 4003 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file describes the interfaces for adapting OPFTaskDriver to specific
environments.
These interfaces encapsulate external specifics, such as
data source (e.g., .csv file or database, etc.), prediction sink (.csv file or
databse, etc.), report and serialization destination, etc.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
class PredictionLoggerIface(object):
""" This class defines the interface for OPF prediction logger implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def close(self):
""" Closes connect to output store and cleans up any resources associated
with writing
"""
@abstractmethod
def writeRecord(self, modelResult):
""" Emits a set of inputs data, inferences, and metrics from a model
resulting from a single record.
modelResult: An opfutils.ModelResult object that contains the model input
and output for the current timestep.
"""
@abstractmethod
def writeRecords(self, modelResults, progressCB=None):
""" Same as writeRecord above, but emits multiple rows in one shot.
modelResults: a list of opfutils.ModelResult objects, Each dictionary
represents one record.
progressCB: an optional callback method that will be called after each
batch of records is written.
"""
@abstractmethod
def setLoggedMetrics(self, metricNames):
""" Sets which metrics should be written to the prediction log
Parameters:
-----------------------------------------------------------------------
metricNames: A list of metric names that match the labels of the
metrics that should be written to the prediction log
"""
@abstractmethod
def checkpoint(self, checkpointSink, maxRows):
""" Save a checkpoint of the prediction output stream. The checkpoint
comprises up to maxRows of the most recent inference records.
Parameters:
----------------------------------------------------------------------
checkpointSink: A File-like object where predictions checkpoint data, if
any, will be stored.
maxRows: Maximum number of most recent inference rows
to checkpoint.
"""
# PredictionLoggingElement class
#
# This named tuple class defines an element in the sequence of predictions
# that are passed to PredictionLoggerIface.emit()
#
# predictionKind: A PredictionKind constant representing this prediction
# predictionRow: A sequence (list, tuple, or nupic array) of field values
# comprising the prediction. The fields are in the order as
# described for the inputRecordSensorMappings arg of the
# PredictionLoggerIface.__call__ method
PredictionLoggingElement = namedtuple("PredictionLoggingElement",
("predictionKind", "predictionRow",
"classification"))
| agpl-3.0 |
tst-ahernandez/earthenterprise | earth_enterprise/src/server/wsgi/serve/publish/publish_manager_helper.py | 1 | 62019 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Publish manager helper module.
Classes for handling publish requests on the low level (database, filesystem).
"""
import copy
import datetime
import json
import logging
import operator
import os
import re
import shutil
import subprocess
import tempfile
import urlparse
from common import exceptions
from common import utils
import psycopg2
from serve import basic_types
from serve import constants
from serve import http_io
from serve import serve_utils
from serve import stream_manager
from serve.push.search.core import search_manager
logger = logging.getLogger("ge_stream_publisher")
HTACCESS_REWRITE_BASE = "\nRewriteBase /\n"
# Minimum portable globe size in MB.
GLOBE_SIZE_THRESHOLD = 1.0
LINE0_TARGETDESCR = "\n# target: %s\n"
# Rewrite rule template for adding trailing slash.
LINE1_TRAILING_SLASH_REWRITERULE = "RewriteRule '^%s$' '%s/' [NC,R]\n"
# Rewrite rule template for POISearch serving.
LINE2_POISEARCH_REWRITERULE = "RewriteRule '^%s/%s(.*)' %s$1 [NC,PT]\n"
# Rewrite rule templates for WMS serving.
WMS_LINE0_REWRITERULE_R404 = "RewriteRule '^%s/wms' - [NC,R=404]\n"
WMS_LINE0_REWRITECOND = "RewriteCond %{QUERY_STRING} ^(.*)$\n"
WMS_LINE1_REWRITERULE = (
"RewriteRule '^%s/wms' 'wms?%%1&TargetPath=%s' [NC,PT]\n")
# Rewrite rules templates for GE database requests serving.
GE_LINE0_REWRITERULE = "RewriteRule '^%s/+$' earth/earth_local.html [NC,PT]\n"
GE_LINE1_REWRITECOND = "RewriteCond %{QUERY_STRING} ^(.*)$\n"
GE_LINE2_REWRITERULE = (
"RewriteRule '^%s/(.*)' '%s%s/db/$1?%%1&db_type=%s' [NC]\n")
# Rewrite rules templates for Map database requests serving.
MAP_LINE0_LOCAL_REWRITERULE = (
"RewriteRule '^%s/+$' maps/maps_local.html [NC,PT]\n")
MAP_LINE0_GOOGLE_REWRITERULE = (
"RewriteRule '^%s/+$' maps/maps_google.html [NC,PT]\n")
MAP_LINE1_REWRITERULE = (
"RewriteRule '^%s/+maps/+mapfiles/(.*)$' maps/mapfiles/$1 [NC,PT]\n")
MAP_LINE2_REWRITECOND = "RewriteCond %{QUERY_STRING} ^(.*)$\n"
MAP_LINE3_REWRITERULE = (
"RewriteRule '^%s/(.*)' '%s%s/db/$1?%%1&db_type=%s' [NC]\n")
# Rewrite rules templates for portable globes requests serving.
# GLB or 3d GLC
GLX_LINE0_REWRITERULE = (
"RewriteRule '^%s/+$' portable/preview.html?%s [NC,PT]\n")
GLX_LINE1_REWRITECOND = "RewriteCond %{QUERY_STRING} ^(.*)$\n"
GLX_LINE2_REWRITERULE = (
"RewriteRule '^%s/(.*)' '%s%s/db/$1?%%1&db_type=%s' [NC]\n")
class PublishManagerHelper(stream_manager.StreamManager):
"""Class for handling publish requests."""
VS_CONFIG_PATH = "/opt/google/gehttpd/conf.d/virtual_servers"
HTACCESS_PATH = "/opt/google/gehttpd/htdocs/.htaccess"
HTACCESS_TMP_PREFIX = "gepublish_htacces_"
HTACCESS_GE_PUBLISH_BEGIN = "### GE_PUBLISH BEGIN\n"
HTACCESS_GE_PUBLISH_END = "### GE_PUBLISH END\n"
PUBLISH_PATH_TEMPL = "{0}{1}"
TARGET_PATH_TEMPL = "{0}/targets{1}"
def __init__(self):
"""Inits publish manager helper."""
super(PublishManagerHelper, self).__init__()
self._search_manager = search_manager.SearchManager()
def BuildDbPublishPath(self, fusion_hostname, db_name):
"""Builds publish path for Fusion database.
Args:
fusion_hostname: Fusion hostname.
db_name: database name (assetroot path).
Returns:
The complete publish path of specified Fusion database.
"""
# Fusion hostname should be always defined for Fusion database.
assert fusion_hostname
return os.path.normpath(
PublishManagerHelper.PUBLISH_PATH_TEMPL.format(
self.GetFusionDbPublishPathPrefix(fusion_hostname), db_name))
def BuildTargetPublishPath(self, db_publish_path, target_path):
"""Builds complete publish path for target point of Fusion database.
Args:
db_publish_path: publish path of database.
target_path: target path.
Returns:
The complete publish path of specified target.
"""
return os.path.normpath(
PublishManagerHelper.TARGET_PATH_TEMPL.format(
db_publish_path, target_path))
def HandleQueryRequest(self, request, response):
"""Handles query requests.
Args:
request: request object.
response: response object.
Raises:
psycopg2.Error/Warning, PublishServeException.
"""
query_cmd = request.GetParameter(constants.QUERY_CMD)
if not query_cmd:
raise exceptions.PublishServeException("Missing Query Command.")
# List all DBs registered on server.
if query_cmd == constants.QUERY_CMD_LIST_DBS:
self._GetDbsList(response)
# TODO: Convert _GetAllAssets to _GetDbsList once
# the front end is ready to receive the new response.
elif query_cmd == constants.QUERY_CMD_LIST_ASSETS:
self._GetAllAssets(response)
# List all Virtual Hosts registered on server.
elif query_cmd == constants.QUERY_CMD_LIST_VSS:
results = self.QueryVhList()
for vh_name, vh_url, vh_ssl in results:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_VS_NAME, vh_name)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_VS_URL,
self._GetVhCompleteUrl(vh_url, vh_ssl))
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
# Get Virtual Host details.
elif query_cmd == constants.QUERY_CMD_VS_DETAILS:
vh_name = request.GetParameter(constants.VS_NAME)
if not vh_name:
raise exceptions.PublishServeException("Missing virtual host name.")
vh_url, vh_ssl = self.QueryVh(vh_name)
vh_complete_url = self._GetVhCompleteUrl(vh_url, vh_ssl)
if vh_complete_url:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_VS_URL, vh_complete_url)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
# List all target paths serving published databases.
elif query_cmd == constants.QUERY_CMD_LIST_TGS:
query_string = (
"SELECT target_path FROM target_table WHERE target_id IN ("
"SELECT target_id FROM target_db_table)")
results = self.DbQuery(query_string)
for line in results:
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_TARGET_PATH, line)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
# Get target details.
# TODO: consider to remove unnecessary details from response.
# It might be QUERY_CMD_DB_DETAILS which return only DB info that we have
# in postgres for specified target.
# Note: in ListDbs we return all this information.
elif query_cmd == constants.QUERY_CMD_PUBLISHED_DB_DETAILS:
target_path = request.GetParameter(constants.TARGET_PATH)
norm_target_path = serve_utils.NormalizeTargetPath(target_path)
if not norm_target_path:
raise exceptions.PublishServeException("Missing target path.")
query_string = ("""
SELECT db_table.host_name, db_table.db_name, db_table.db_pretty_name,
db_table.db_timestamp AT TIME ZONE 'UTC', db_table.db_size,
virtual_host_table.virtual_host_name,
virtual_host_table.virtual_host_url,
virtual_host_table.virtual_host_ssl,
target_table.target_path, target_table.serve_wms
FROM target_table, target_db_table, db_table, virtual_host_table
WHERE target_table.target_path = %s AND
target_table.target_id = target_db_table.target_id AND
target_db_table.db_id = db_table.db_id AND
target_db_table.virtual_host_id = virtual_host_table.virtual_host_id
""")
results = self.DbQuery(query_string, (norm_target_path,))
if results:
assert isinstance(results, list) and len(results) == 1
(r_host_name, r_db_path, r_db_name, r_db_timestamp, r_db_size,
r_virtual_host_name, r_virtual_host_url, r_virtual_host_ssl,
r_target_path, r_serve_wms) = results[0]
db_info = basic_types.DbInfo()
# TODO: make re-factoring - implement some Set function
# to use it where it is needed. Maybe build an aux. dictionary and
# pass as a parameter to that function.
db_info.host = r_host_name # db_table.host_name
db_info.path = r_db_path # db_table.db_name
db_info.name = r_db_name # db_table.db_pretty_name
timestamp = r_db_timestamp # db_table.db_timestamp
if timestamp:
assert isinstance(timestamp, datetime.datetime)
db_info.timestamp = serve_utils.DatetimeNoTzToIsoFormatUtc(timestamp)
db_info.size = r_db_size # db_table.db_size
db_info.description = r_db_name # db_table.db_pretty_name
db_info.virtual_host_name = r_virtual_host_name
db_info.target_base_url = self.GetVhBaseUrl(r_virtual_host_url,
r_virtual_host_ssl)
db_info.target_path = r_target_path
db_info.serve_wms = r_serve_wms
db_info.registered = True
# Calculate database attributes.
serve_utils.CalcDatabaseAttributes(db_info)
# Check whether the Fusion database has been pushed from remote host
# and set corresponding flag in DbInfo.
if serve_utils.IsFusionDb(db_info.type):
db_info.remote = self._IsFusionDbRemote(db_info)
# Set whether database has POI search data.
search_db_id = self._search_manager.QueryDbId(
db_info.host, db_info.path)
db_info.has_poi = search_db_id != 0
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_DATA,
json.dumps(db_info, cls=basic_types.DbInfoJsonEncoder))
else:
raise exceptions.PublishServeException(
"Target path %s does not exist." % target_path)
# Gets published DB path by target path.
elif query_cmd == constants.QUERY_CMD_GEDB_PATH:
query_target_path = request.GetParameter(constants.TARGET_PATH)
norm_target_path = serve_utils.NormalizeTargetPath(query_target_path)
if not norm_target_path:
raise exceptions.PublishServeException("Missing target path.")
query_string = ("""
SELECT db_table.host_name, db_table.db_name, target_table.target_path
FROM target_table, target_db_table, db_table
WHERE target_table.target_path = %s AND
target_table.target_id = target_db_table.target_id AND
target_db_table.db_id = db_table.db_id
""")
results = self.DbQuery(query_string, (norm_target_path,))
if results:
assert isinstance(results, list) and len(results) == 1
(client_host_name, db_path, target_path) = results[0]
gedb_path = self.BuildDbPublishPath(client_host_name, db_path)
target_gedb_path = self.BuildTargetPublishPath(
gedb_path, target_path)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_DATA, target_gedb_path)
else:
raise exceptions.PublishServeException(
"Target path '%s' does not exist." % query_target_path)
elif query_cmd == constants.QUERY_CMD_TARGET_DETAILS:
target_path_in = request.GetParameter(constants.TARGET_PATH)
if not target_path_in:
raise exceptions.PublishServeException(
"Missing target path in the request.")
target_path = serve_utils.NormalizeTargetPath(target_path_in)
if not target_path:
raise exceptions.PublishServeException(
"Not a valid target path %s "
"(path format is /sub_path1[/sub_path2]." % target_path)
self.HandleTargetDetailsRequest(target_path, response)
else:
raise exceptions.PublishServeException(
"Invalid Query Command: %s." % query_cmd)
def HandlePublishRequest(self, db_id, publish_def):
"""Handles publish database request.
Args:
db_id: database ID.
publish_def: The PublishDef object encapsulating
set of the publish parameters.
Raises:
psycopg2.Error/Warning, PublishServeException.
"""
target_path = publish_def.target_path
virtual_host_name = publish_def.virtual_host_name
db_type = publish_def.db_type
client_host_name = publish_def.client_host_name
serve_wms = publish_def.serve_wms
snippets_set_name = publish_def.snippets_set_name
search_defs = publish_def.search_tabs
sup_search_defs = publish_def.sup_search_tabs
poifederated = publish_def.poi_federated
assert target_path and target_path[0] == "/" and target_path[-1] != "/"
# Check if the VS template exists.
virtual_host_id = self._QueryVirtualHostId(virtual_host_name)
if virtual_host_id == -1:
raise exceptions.PublishServeException(
"Virtual host %s does not exist." % virtual_host_name)
transfer_file_paths = self.SynchronizeDb(db_id, db_type, client_host_name)
if not transfer_file_paths:
# Add target point into target_table.
target_id = self._AddTarget(target_path, serve_wms)
# Insert publish context into 'publish_context_table' table.
query_string = ("INSERT INTO publish_context_table"
" (snippets_set_name, search_def_names,"
" supplemental_search_def_names, poifederated)"
" VALUES(%s, %s, %s, %s) RETURNING"
" publish_context_id")
result = self.DbModify(
query_string,
(snippets_set_name, search_defs, sup_search_defs, poifederated),
returning=True)
publish_context_id = 0
if result:
publish_context_id = result[0]
# Note: target is not removed from target_table in case of
# any exception below.
# Link target point with VS template, database and publish context.
query_string = ("INSERT INTO target_db_table"
" (target_id, virtual_host_id, db_id, publish_context_id)"
" VALUES(%s, %s, %s, %s)")
self.DbModify(query_string,
(target_id, virtual_host_id, db_id, publish_context_id))
else:
raise exceptions.PublishServeException("Database is not pushed.")
def HandleUnpublishRequest(self, target_path):
"""Handles un-publish database request.
Deletes the entry in target_db_table, target_search_id_table,
publish_context_table, updates .htaccess file,
deletes target's publish directory.
Note: target is not removed from target_table
Args:
target_path: target path to un-publish.
Raises:
psycopg2.Error/Warning, PublishServeException.
"""
unused_host_name, db_name = self.DoUnpublish(target_path)
if not db_name:
raise exceptions.PublishServeException(
"There is no database associated with target path %s." % (
target_path))
def DoUnpublish(self, target_path):
"""Do unpublish specified target path.
Args:
target_path: target path to un-publish.
Raises:
psycopg2.Error/Warning.
Returns:
(fusion_host_name, db_name): unpublished database info.
"""
# Check if target exists.
# Note: Here we have case-sensitive query for target from target_table.
# It allows to keep target path as user have entered it. Client gets target
# path to unpublish from Server.
target_id = self._QueryTargetIdByPath(target_path)
if target_id == -1:
logger.warning(
"HandleUnpublishRequest: The target path %s does not exist.",
target_path)
return None, None
publish_context_id = self._QueryPublishContextId(target_id)
# Only try delete from the publish_context_table for
# a valid non zero publish_context_id.
if publish_context_id != 0:
# Delete the entry in 'publish_context_table' table.
query_string = ("DELETE FROM publish_context_table "
"WHERE publish_context_id = %s")
num_rows = self.DbModify(query_string, (publish_context_id,))
# Get db_name before deleting a corresponding entry in the
# target_db_table.
(unused_virtual_host_url, db_name, fusion_host_name,
unused_db_flags) = self._QueryTargetDetailsById(target_id)
# Delete the entry in target_db_table.
query_string = "DELETE FROM target_db_table WHERE target_id = %s"
num_rows = self.DbModify(query_string, (target_id,))
if num_rows:
# Remove un-published target from .htaccess by updating .htaccess file.
self.UpdateHtaccessFile()
if db_name:
# Delete target's publish directory.
self.DeleteTargetPublishDir(target_path, fusion_host_name, db_name)
return fusion_host_name, db_name
def IsTargetPathUsed(self, target_path):
"""Checks whether specific target path is in use.
Note: The check is case-insensitive, since we make target path(URL-path)
case insensitive. We do not allow to have two of the same published points,
while keeping a target path in database as user have entered it.
Args:
target_path: target path.
Returns:
whether target path is in use.
Raises:
psycopg2.Error/Warning.
"""
query_string = ("""
SELECT 1 FROM target_table, target_db_table
WHERE lower(target_table.target_path) = %s AND
target_table.target_id = target_db_table.target_id
LIMIT 1""")
result = self.DbQuery(query_string, (target_path.lower(),))
if result:
return True
return False
def DeleteTargetPublishDir(self, target_path, client_host_name, db_name):
"""Deletes target's publish directory.
Args:
target_path: target path.
client_host_name: client host name.
db_name: database name (assetroot path).
Raises:
PublishServeException.
"""
(norm_db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
if serve_utils.IsFusionDb(db_type):
if not client_host_name:
raise exceptions.PublishServeException(
"Internal error - undefined host name for Fusion database %s." %
db_name)
gedb_path = self.BuildDbPublishPath(client_host_name, norm_db_path)
target_gedb_path = self.BuildTargetPublishPath(gedb_path, target_path)
try:
logger.debug("Delete DB publish directory: %s", target_gedb_path)
# Remove all the files/dirs under the publish db path (included).
shutil.rmtree(target_gedb_path)
except OSError as e:
logger.warning(
"HandleUnpublishRequest: Could not delete DB publish directory: %s"
", Error: %s", target_gedb_path, e)
try:
# Remove '..gedb/targets'- directory if it is empty.
os.rmdir(os.path.dirname(target_gedb_path))
except OSError:
pass
def Cleanup(self):
"""Cleans up publisher (publish info tables).
Un-publishes Fusion DBs, portable globes that do not exist on filesystem.
Returns:
list of unpublished Fusion DBs/portables [{host:, path:},..]
"""
# Get information about published DBs/globes.
query_string = (
"""SELECT db_table.host_name, db_table.db_name, db_table.db_pretty_name,
target_table.target_path
FROM target_db_table, db_table, target_table
WHERE target_table.target_id = target_db_table.target_id AND
db_table.db_id = target_db_table.db_id
""")
results = self.DbQuery(query_string)
unpublished_dbs = []
# Flag for whether globes directory is mounted and at least one portable
# globe exists. If not, don't remove Portables from postgres db.
is_globes_mounted = (
os.path.exists(constants.CUTTER_GLOBES_PATH) and
serve_utils.ExistsPortableInDir(
constants.CUTTER_GLOBES_PATH))
if not is_globes_mounted:
logger.warning(
"HandleCleanupRequest: No portable files in directory %s."
" Volume may not be mounted.",
constants.CUTTER_GLOBES_PATH)
logger.warning("Portable globe publish records have not been cleaned.")
for line in results:
# Get database type.
(db_path, db_type) = serve_utils.IdentifyPublishedDb(line[1])
do_clean_up = False
if serve_utils.IsFusionDb(db_type):
db_host = line[0]
publish_db_path = self.BuildDbPublishPath(db_host, db_path)
publish_db_path = "{0}/header.xml".format(publish_db_path)
db_name = serve_utils.GetFusionDbInfoName(line[2], db_type)
do_clean_up = True
else:
assert serve_utils.IsPortable(db_type)
if is_globes_mounted:
publish_db_path = "{0}{1}".format(
constants.CUTTER_GLOBES_PATH, db_path)
db_name = line[1]
db_host = ""
do_clean_up = True
else:
logger.warning("%s does not exist. Volume may not be mounted.",
PublishManagerHelper.CUTTER_GLOBES_PATH)
target_path = line[3]
if do_clean_up and not os.path.exists(publish_db_path):
self.DoUnpublish(target_path)
unpublished_dbs.append({"host": db_host, "path": db_path})
logger.warning(
"The database/portable globe '{0}' could not be found."
" The path '{1}' serving it has been un-published.".format(
db_name, target_path))
logger.info("Publish info cleanup is complete.")
return unpublished_dbs
def _QueryTargetDbDetailsByPath(self, target_path):
"""Queries target details by target path.
Args:
target_path: target path.
Raises:
psycopg2.Error/Warning.
Returns:
target details as tuple (virtual_host_url, db_name, host_name).
(None, None, None) tuple is returned in case of no DB published to
this target.
"""
assert target_path and target_path[0] == "/" and target_path[-1] != "/"
target_details = {}
query_string = ("""SELECT db_table.host_name, db_table.db_name,
virtual_host_table.virtual_host_name, target_table.serve_wms
FROM target_table, target_db_table, db_table, virtual_host_table
WHERE target_table.target_path = %s AND
target_table.target_id = target_db_table.target_id AND
target_db_table.db_id = db_table.db_id AND
target_db_table.virtual_host_id =
virtual_host_table.virtual_host_id""")
result = self.DbQuery(query_string, (target_path,))
if result:
assert isinstance(result[0], tuple)
(db_host_name, db_name, virtual_host_name, servewms) = result[0]
target_details.update({
"servewms": servewms,
"fusion_host": db_host_name,
"dbname": db_name,
"vhname": virtual_host_name,
})
return target_details
def _QueryPublishContextByTargetPath(self, target_path):
"""Queries gestream database to get publish_context for target path.
Args:
target_path : target path.
Raises:
psycopg2.Error/Warning.
Returns:
publish_context as dict with fields {snippetssetname:string,
searchdefs:[string,], supsearchdefs:[string,], poifederated:bool}.
"""
publish_context = {}
query_string = ("""SELECT publish_context_table.snippets_set_name,
publish_context_table.search_def_names,
publish_context_table.supplemental_search_def_names,
publish_context_table.poifederated
FROM target_table, target_db_table, publish_context_table
WHERE target_table.target_path = %s AND
target_table.target_id = target_db_table.target_id AND
target_db_table.publish_context_id =
publish_context_table.publish_context_id""")
result = self.DbQuery(query_string, (target_path,))
if result:
assert isinstance(result[0], tuple)
(snippets_set_name, search_def_names, sup_search_def_names,
poifederated) = result[0]
publish_context.update({
"snippetsetname": snippets_set_name,
"searchdefs": search_def_names,
"supsearchdefs": sup_search_def_names,
})
if "POISearch" in search_def_names:
publish_context["poifederated"] = poifederated
return publish_context
def _QueryPublishContextId(self, target_id):
"""Queries publish_context_id from target_db_table.
Args:
target_id: target path Id.
Raises:
psycopg2.Error/Warning.
Returns:
Publish context id.
"""
publish_context_id = 0
query_string = ("SELECT publish_context_id FROM target_db_table "
"WHERE target_id = %s")
result = self.DbQuery(query_string, (target_id,))
if result:
publish_context_id = int(result[0])
return publish_context_id
def _QueryTargetDetailsById(self, target_id):
"""Queries target details by target ID.
Args:
target_id: target ID.
Raises:
psycopg2.Error/Warning.
Returns:
target details as tuple (virtual_host_url, db_name, host_name).
(None, None, None) tuple is returned in case of no DB published to
this target.
"""
virtual_host_url = None
db_name = None
host_name = None
db_flags = None
query_string = ("""
SELECT virtual_host_table.virtual_host_url, db_table.db_name,
db_table.host_name, db_table.db_flags
FROM target_db_table, virtual_host_table, db_table
WHERE target_db_table.target_id = %s AND
virtual_host_table.virtual_host_id =
target_db_table.virtual_host_id AND
db_table.db_id = target_db_table.db_id""")
result = self.DbQuery(query_string, (target_id,))
if result:
assert isinstance(result[0], tuple)
(virtual_host_url, db_name, host_name, db_flags) = result[0]
return (virtual_host_url, db_name, host_name, db_flags)
def HandleAddVsRequest(self,
vs_name, vs_url, vs_ssl, vs_cache_level,
response):
"""Handles add virtual server request.
Args:
vs_name: the virtual server name.
vs_url: the virtual server URL.
vs_ssl: whether it is SSL virtual server.
vs_cache_level: the virtual server cache level.
response: the response object.
Raises:
psycopg2.Error/Warning, PublishServeException
"""
# Check if virtual host already exists.
if self._QueryVirtualHostId(vs_name) != -1:
raise exceptions.PublishServeException(
"HandleAddVsRequest: Virtual host %s already exists." % vs_name)
# We do not check if the corresponding config file exists. This because
# we don't know how our users might want to name that file.
# Add the virtual host entry.
query_string = (
"INSERT INTO virtual_host_table (virtual_host_name,"
" virtual_host_url, virtual_host_ssl, virtual_host_cache_level)"
" VALUES(%s, %s, %s, %s)")
self.DbModify(query_string, (vs_name, vs_url, vs_ssl, vs_cache_level))
# Create virtual server config file.
vs_url_complete = self._GetVhCompleteUrl(vs_url, vs_ssl)
self._CreateVsConfig(vs_name, vs_url_complete)
self._RestartServers()
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
def HandleDeleteVsRequest(self, vs_name, response):
"""Handles delete virtual server request.
Args:
vs_name: virtual host name.
response: response object.
Raises:
psycopg2.Error/Warning, PublishServeException
"""
# Check if virtual server exists and is disabled. There is no database
# published on this virtual server.
if self._QueryVsUsed(vs_name):
raise exceptions.PublishServeException(
"HandleDeleteVsRequest: Make sure the virtual host %s"
" exists and is currently not being used." % vs_name)
# Delete the entry in virtual_host_table.
query_string = "DELETE FROM virtual_host_table WHERE virtual_host_name = %s"
self.DbModify(query_string, [vs_name])
self._RestartServers()
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_STATUS_CODE, constants.STATUS_SUCCESS)
def GetPublishInfoList(self):
"""Gets publish info list.
Returns:
list of tuples (target_path, host_name, db_name).
"""
query_string = """
SELECT target_path, host_name, db_name
FROM target_table, db_table, target_db_table
WHERE
target_table.target_id = target_db_table.target_id AND
db_table.db_id = target_db_table.db_id"""
return self.DbQuery(query_string)
def GetSearchDefDetails(self, search_def_name):
return self._search_manager.GetSearchDefDetails(search_def_name)
def GetSearchDbId(self, client_host_name, db_name):
return self._search_manager.QueryDbId(client_host_name, db_name)
def GetVsUrlPathList(self):
query_string = (
"SELECT virtual_host_url FROM virtual_host_table")
results = self.DbQuery(query_string)
vh_list = []
for vh_url in results:
url_parse_res = urlparse.urlparse(vh_url)
vh_list.append(url_parse_res.path)
return vh_list
def GetCutSpecs(self):
"""Gets cut specifications.
Returns:
list of cut specifications.
"""
results = self.DbQuery("""
SELECT name, qtnodes, exclusion_qtnodes,
min_level, default_level, max_level
FROM cut_spec_table""")
return results
def UpdateHtaccessFile(self):
"""Updates .htaccess file."""
# Get a list of (target_path, target_id) pairs from target_table.
target_paths_list = self._ListTargetPaths()
if not target_paths_list:
return
# Sort by target path in descending order.
# Note: The order in which these rules are defined is
# important - this is the order in which they will be applied at run-time.
# Sorting in descending order is necessary to prevent usurping by shorter
# paths that would match first.
get_key = operator.itemgetter(0)
target_paths_list.sort(key=lambda elem: get_key(elem).lower(), reverse=True)
# Write publish content into .htaccess.
out_file = tempfile.NamedTemporaryFile(
mode="w+",
prefix=PublishManagerHelper.HTACCESS_TMP_PREFIX,
delete=False)
try:
if os.path.exists(PublishManagerHelper.HTACCESS_PATH):
is_publish_content_added = False
with open(PublishManagerHelper.HTACCESS_PATH, mode="r") as in_file:
in_publish_section = False
for line in in_file:
if line == PublishManagerHelper.HTACCESS_GE_PUBLISH_BEGIN:
in_publish_section = True
self._WritePublishContentToHtaccessFile(
out_file, target_paths_list)
is_publish_content_added = True
elif line == PublishManagerHelper.HTACCESS_GE_PUBLISH_END:
in_publish_section = False
continue
if not in_publish_section:
out_file.write(line)
if not is_publish_content_added:
self._WritePublishContentToHtaccessFile(out_file, target_paths_list)
else:
self._WritePublishContentToHtaccessFile(out_file, target_paths_list)
except Exception:
out_file.close()
os.unlink(out_file.name)
raise
else:
# Copy temp htaccess file into htdocs rewriting existing .htaccess.
out_file.close()
shutil.copyfile(out_file.name, PublishManagerHelper.HTACCESS_PATH)
os.unlink(out_file.name)
def _AddTarget(self, target_path, serve_wms):
"""Adds target path into target_table and sets serve_wms flag.
Args:
target_path: target path.
serve_wms: whether target point is servable through WMS.
Raises:
psycopg2.Error/Warning, PublishServeException.
PublishServeException is raised in case of this target path is already
in use.
Returns:
target_id: ID of added/existed target point.
"""
assert target_path and target_path[0] == "/" and target_path[-1] != "/"
# Check if the target point already exists.
# Note: Here we have case-sensitive query for target from target_table.
# It allows to keep target path as user have entered it.
target_id = self._QueryTargetIdByPath(target_path)
if target_id != -1:
# Check if the target point is currently used.
if self._QueryIsTargetUsed(target_id):
# Note: might be an assert since we check it before.
raise exceptions.PublishServeException(
"Target path %s is already in use. Note that paths are "
"case insensitve. Input another path"
" or un-publish database using this path." % target_path)
# Sets serve_wms flag for existing path.
query_string = ("UPDATE target_table SET serve_wms = %s"
" WHERE target_path = %s")
self.DbModify(query_string, (serve_wms, target_path))
return target_id
# Add the target point entry.
query_string = (
"INSERT INTO target_table (target_path, serve_wms) VALUES(%s, %s)")
self.DbModify(query_string, (target_path, serve_wms))
target_id = self._QueryTargetIdByPath(target_path)
return target_id
def _QueryVirtualHostId(self, virtual_host_name):
"""Queries Virtual Host ID by name.
Args:
virtual_host_name: name of Virtual Host.
Raises:
psycopg2.Error/Warning.
Returns:
ID of Virtual Host in case of it exists, otherwise -1.
"""
query_string = ("SELECT virtual_host_id FROM virtual_host_table"
" WHERE virtual_host_name = %s")
result = self.DbQuery(query_string, (virtual_host_name,))
virtual_host_id = -1
if result:
virtual_host_id = int(result[0])
return virtual_host_id
def _QueryVirtualHostIdAndDbId(self, target_id):
"""Queries Virtual Host ID and Db ID by target ID.
Args:
target_id: target ID.
Raises:
psycopg2.Error/Warning.
Returns:
tuple (virtual_host_id, db_id). If there is no DB published on
specified target then it returns tuple (None, None).
"""
query_string = ("SELECT virtual_host_id, db_id FROM target_db_table"
" WHERE target_id = %s")
result = self.DbQuery(query_string, (target_id,))
virtual_host_id = None
db_id = None
if result:
assert isinstance(result[0], tuple)
virtual_host_id = int(result[0][0])
db_id = int(result[0][1])
return (virtual_host_id, db_id)
def _QueryTargetIdByPath(self, target_path):
"""Queries target point ID by its path.
Note: query is case-sensitive since we keep target path in database as user
have entered it.
Args:
target_path: target point path.
Raises:
psycopg2.Error/Warning.
Returns:
ID of target point in case of it exists otherwise -1.
"""
query_string = "SELECT target_id FROM target_table WHERE target_path = %s"
result = self.DbQuery(query_string, (target_path,))
target_id = -1
if result:
target_id = int(result[0])
return target_id
def _QueryIsTargetUsed(self, target_id):
"""Queries whether target point is taken.
Args:
target_id: target point ID.
Raises:
psycopg2.Error/Warning.
Returns:
whether target point with specified target_id is used.
"""
is_target_used = False
query_string = "SELECT db_id FROM target_db_table WHERE target_id = %s"
result = self.DbQuery(query_string, (target_id,))
if result:
is_target_used = True
return is_target_used
def _QueryDbAndHostName(self, db_id):
"""Queries database name and host name by database ID.
Args:
db_id: database ID.
Raises:
psycopg2.Error/Warning.
Returns:
(db_name, host_name) tuple. Values are None in case database with
specified db_id does not exist.
"""
host_name = None
db_name = None
if db_id == 0:
return (db_name, host_name)
query_string = "SELECT db_name, host_name FROM db_table WHERE db_id = %s"
result = self.DbQuery(query_string, (db_id,))
if result:
assert isinstance(result[0], tuple)
db_name = result[0][0]
host_name = result[0][1]
return (db_name, host_name)
def _QueryVsUsed(self, vs_name):
"""Queries whether virtual server is used.
Virtual server is used - there is a database served with it.
Args:
vs_name: virtual server name.
Returns:
whether virtual server is used.
Raises:
psycopg2.Error/Warning
"""
query_string = (
"SELECT db_id FROM target_db_table WHERE virtual_host_id IN ("
"SELECT virtual_host_id FROM virtual_host_table"
" WHERE virtual_host_name = %s)")
results = self.DbQuery(query_string, (vs_name,))
if results:
return True
return False
def _GetDbsList(self, response):
"""Gets list of available databases.
Args:
response: response object.
Raises:
psycopg2.Error/Warning.
Returns:
list of DbInfo objects serialized into json response object.
"""
# TODO: try-except here is a temporary solution.
# Move to DoGet() when all responses are formatted in json.
try:
database_list, unused_set = self._GetDatabaseList()
http_io.ResponseWriter.AddJsonBody(
response, constants.STATUS_SUCCESS, database_list)
except exceptions.PublishServeException as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(response, str(e))
except psycopg2.Warning as w:
logger.error(w)
http_io.ResponseWriter.AddJsonFailureBody(response, str(e))
except psycopg2.Error as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(response, str(w))
except Exception as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(
response, "Server-side Internal Error: {0}".format(e))
def _GetAllAssets(self, response):
"""Gets list of available fusion databases and portables.
Args:
response: response object.
Raises:
psycopg2.Error/Warning.
Returns:
list of databases and portables serialized into json response object.
"""
try:
results, registered_portable_set = self._GetDatabaseList()
results.extend(self._GetPortableGlobesList(registered_portable_set))
http_io.ResponseWriter.AddJsonBody(
response, constants.STATUS_SUCCESS, results)
except exceptions.PublishServeException as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(response, str(e))
except psycopg2.Warning as w:
logger.error(w)
http_io.ResponseWriter.AddJsonFailureBody(response, str(e))
except psycopg2.Error as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(response, str(w))
except Exception as e:
logger.error(e)
http_io.ResponseWriter.AddJsonFailureBody(
response, "Server-side Internal Error: {0}".format(e))
# TODO: add database description in Fusion and handle it here.
def _GetDatabaseList(self):
"""Gets list of Fusion databases.
Raises:
psycopg2.Error/Warning.
Returns:
tuple (list of DbInfo objects, list of registered portable names).
"""
# Get database information from db_table.
query_string = ("SELECT db_id, host_name, db_name, db_pretty_name,"
" db_timestamp AT TIME ZONE 'UTC', db_size FROM db_table")
results = self.DbQuery(query_string)
# Holder for registered portable names.
registered_portable_set = set()
# Parsing results into DbInfo list.
db_info_list = []
db_id_list = []
for line in results:
db_id_list.append(int(line[0])) # db_table.db_id
db_info = basic_types.DbInfo()
db_info.host = line[1] # db_table.host_name
db_info.path = line[2] # db_table.db_name
db_info.name = line[3] # db_table.db_pretty_name
timestamp = line[4] # db_table.db_timestamp
if timestamp:
assert isinstance(timestamp, datetime.datetime)
db_info.timestamp = serve_utils.DatetimeNoTzToIsoFormatUtc(timestamp)
db_info.size = line[5] # db_table.db_size
db_info.description = line[3] # db_table.db_pretty_name
db_info.registered = True
# Determine database features.
serve_utils.CalcDatabaseAttributes(db_info)
# Check whether the Fusion database has been pushed from remote host and
# set corresponding flag in DbInfo.
if serve_utils.IsFusionDb(db_info.type):
db_info.remote = self._IsFusionDbRemote(db_info)
# Store name of registered portables to avoid list duplicates.
if serve_utils.IsPortable(db_info.type):
storage_name = (db_info.name[1:] if db_info.name[0] == "/" else
db_info.name)
registered_portable_set.add(storage_name)
db_info_list.append(db_info)
# Set whether database has POI search data.
search_dbs_set = set(self._search_manager.QueryListDbs())
if search_dbs_set:
for db_info in db_info_list:
db_info.has_poi = (db_info.host, db_info.path) in search_dbs_set
# Get auxiliary dictionary mapping a database ID to a publish info list.
db_to_publish_info = self._GetDbIdToPublishInfoDict()
# Get auxiliary dictionary mapping a Virtual Host name to a base URL.
vhname_to_baseurl = self._GetVhNameToBaseUrlDict()
# For published databases in DbInfo list, set publish info:
# virtual host name, target base URL, target path, serve_wms.
# Note: we get additional db_info-s in case we have
# databases that are published to more then one target.
add_db_info_list = []
for db_id, db_info in zip(db_id_list, db_info_list):
if db_id in db_to_publish_info:
publish_info_list = db_to_publish_info[db_id]
publish_info = publish_info_list[0]
db_info.virtual_host_name = publish_info[0]
db_info.target_base_url = vhname_to_baseurl[db_info.virtual_host_name]
db_info.target_path = publish_info[1]
db_info.serve_wms = publish_info[2]
if len(publish_info_list) > 1:
for vh_name, target_path, serve_wms in publish_info_list[1:]:
add_db_info = copy.copy(db_info)
add_db_info.virtual_host_name = vh_name
add_db_info.target_base_url = vhname_to_baseurl[
add_db_info.virtual_host_name]
add_db_info.target_path = target_path
add_db_info.serve_wms = serve_wms
add_db_info_list.append(add_db_info)
db_info_list.extend(add_db_info_list)
return (db_info_list, registered_portable_set)
def _GetPortableGlobesList(self, registered_portable_set):
"""Gets portable globes list.
Scans cutter/globes directory and sub-directories for all the glx files
located there. First checks for registered portables and ignores them;
They are already added in _GetDatabaseList().
Args:
registered_portable_set: set of registered portable names.
Returns:
list of (unregistered) Portable globes.
"""
# Build a list of portable globes.
globes_list = []
root = constants.CUTTER_GLOBES_PATH
for name in os.listdir(root):
# Ignore globes that are registered.
if name not in registered_portable_set:
db_info = basic_types.DbInfo()
db_info.name = name
db_info.type = db_info.name[-3:]
# Ignore files that are not Portables, eg .README
if serve_utils.IsPortable(db_info.type):
serve_utils.GlxDetails(db_info)
if db_info.size > GLOBE_SIZE_THRESHOLD:
globes_list.append(db_info)
return globes_list
def _CreateVsConfig(self, vs_name, vs_url):
"""Writes virtual server config for specified virtual host.
Args:
vs_name: virtual server name.
vs_url: virtual server URL.
"""
logger.debug("_CreateVsConfig...")
url_parse_res = urlparse.urlparse(vs_url)
if url_parse_res.scheme == "https":
vs_config_file_path = os.path.normpath(
os.path.join(
PublishManagerHelper.VS_CONFIG_PATH,
(vs_name + "_host.location_ssl")))
self._WriteSslVsConfig(
vs_config_file_path, vs_name, url_parse_res.path)
else:
vs_config_file_path = os.path.normpath(
os.path.join(
PublishManagerHelper.VS_CONFIG_PATH,
(vs_name + "_host.location")))
self._WriteVsConfig(
vs_config_file_path, vs_name, url_parse_res.path)
def _WriteVsConfig(self, vs_config_file_path, vs_name, vs_path):
"""Write default content to VS config file.
Args:
vs_config_file_path: config file path.
vs_name: virtual server name.
vs_path: virtual server path (location).
"""
with open(vs_config_file_path, "w") as f:
f.write("# The virtual host %s.\n" % vs_name)
f.write("RewriteEngine on\n\n")
f.write("<Location %s/>\n" % vs_path)
f.write(" SetHandler fdb-handler\n")
f.write("</Location>\n")
def _WriteSslVsConfig(self, vs_config_file_path, vs_name, vs_path):
"""Write default content to SSL VS config.
Args:
vs_config_file_path: config file path.
vs_name: virtual server name.
vs_path: virtual server path (location).
"""
with open(vs_config_file_path, "w") as f:
f.write("# The SSL virtual host %s.\n" % vs_name)
f.write("RewriteEngine on\n\n")
f.write("<Location %s/>\n" % vs_path)
f.write(" SetHandler fdb-handler\n")
f.write(" SSLRequireSSL\n")
f.write(" SSLVerifyClient none\n")
f.write("</Location>\n")
def _RestartServers(self):
"""Restart servers.
Raises:
PublishServeException
"""
logger.debug("_RestartServers...")
try:
# Reload Apache configs
cmd_reload = "/opt/google/bin/gerestartapache"
logger.info("Earth Server restarting...")
subprocess.Popen([cmd_reload, ""])
except Exception as e:
raise exceptions.PublishServeException(e)
def _ListTargetPaths(self):
"""Gets target paths serving published databases.
Raises:
psycopg2.Error/Warning.
Returns:
list of tuples (target_path, target_id, serve_wms).
"""
query_string = (
"SELECT target_path, target_id, serve_wms FROM target_table"
" WHERE target_id IN (SELECT target_id FROM target_db_table)")
return self.DbQuery(query_string)
def _WritePublishContentToHtaccessFile(self, htaccess_file,
target_paths_list):
"""Writes publish content into htaccess-file.
Args:
htaccess_file: file descriptor for writing to.
target_paths_list: target paths list.
Raises:
psycopg2.Error/Warning, PublishServeException.
"""
# Write publish header to file.
htaccess_file.write("%s" % PublishManagerHelper.HTACCESS_GE_PUBLISH_BEGIN)
# Write RewriteBase to file.
htaccess_file.write("%s" % HTACCESS_REWRITE_BASE)
# Collects all the needed information for all the target paths based on
# target ID and adds corresponding rewrite rules into htacces-file.
for (target_path, target_id, serve_wms) in target_paths_list:
(virtual_host_url,
db_name, host_name, db_flags) = self._QueryTargetDetailsById(target_id)
if (not virtual_host_url) or (not db_name):
continue # no DB published on this target path.
# Identify type of published DB.
(unused_norm_db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
if serve_utils.IsFusionDb(db_type):
if not host_name:
raise exceptions.PublishServeException(
"Internal Error - undefined host name for Fusion database %s." %
db_name)
else:
assert serve_utils.IsPortable(db_type)
if host_name:
raise exceptions.PublishServeException(
"Internal Error - host name is not empty for portable %s." %
db_name)
# Put the rules into htacces file for current target
url_parse_res = urlparse.urlparse(virtual_host_url)
virtual_host_path = url_parse_res.path
relative_target_path = target_path[1:]
# Common lines for all the databases, globes.
htaccess_file.write(LINE0_TARGETDESCR % target_path)
htaccess_file.write(LINE1_TRAILING_SLASH_REWRITERULE % (
relative_target_path, relative_target_path))
htaccess_file.write(LINE2_POISEARCH_REWRITERULE % (
relative_target_path,
constants.POI_SEARCH_SERVICE_NAME,
constants.POI_SEARCH_SERVICE_NAME))
if serve_wms:
htaccess_file.write(WMS_LINE0_REWRITECOND)
htaccess_file.write(WMS_LINE1_REWRITERULE % (
relative_target_path, target_path))
else:
htaccess_file.write(WMS_LINE0_REWRITERULE_R404 % (
relative_target_path))
# Content for Fusion earth (GE database).
if db_type == basic_types.DbType.TYPE_GE:
htaccess_file.write(GE_LINE0_REWRITERULE % relative_target_path)
htaccess_file.write(GE_LINE1_REWRITECOND)
htaccess_file.write(GE_LINE2_REWRITERULE % (
relative_target_path, virtual_host_path, target_path, db_type))
# Content for Fusion map (map database).
elif db_type == basic_types.DbType.TYPE_MAP:
assert isinstance(db_flags, int)
if db_flags & basic_types.DbFlags.USE_GOOGLE_BASEMAP == 0:
htaccess_file.write(MAP_LINE0_LOCAL_REWRITERULE %
relative_target_path)
else:
htaccess_file.write(MAP_LINE0_GOOGLE_REWRITERULE %
relative_target_path)
htaccess_file.write(MAP_LINE1_REWRITERULE % relative_target_path)
htaccess_file.write(MAP_LINE2_REWRITECOND)
htaccess_file.write(MAP_LINE3_REWRITERULE % (
relative_target_path, virtual_host_path, target_path, db_type))
# Content for portable globes.
elif serve_utils.IsPortable(db_type):
htaccess_file.write(GLX_LINE0_REWRITERULE % (
relative_target_path, target_path))
htaccess_file.write(GLX_LINE1_REWRITECOND)
htaccess_file.write(GLX_LINE2_REWRITERULE % (
relative_target_path, virtual_host_path, target_path, db_type))
else:
raise exceptions.PublishServeException(
"Unsupported DB type %s.", db_type)
# write publish footer to file.
htaccess_file.write("\n%s" %PublishManagerHelper.HTACCESS_GE_PUBLISH_END)
def _GetDbIdToPublishInfoDict(self):
"""Builds a dictionary mapping a database ID to a publish info list.
Returns:
dictionary mapping a database ID to a publish info list -
{db_id: [(vh_name, target_path, serve_wms),]}
"""
# Get (db_id, target paths, virtual_host) tuples for all published dbs.
query_db_target = (
"SELECT target_db_table.db_id,"
" virtual_host_table.virtual_host_name,"
" target_table.target_path, target_table.serve_wms"
" FROM target_db_table, target_table, virtual_host_table"
" WHERE target_table.target_id = target_db_table.target_id AND"
" virtual_host_table.virtual_host_id = target_db_table.virtual_host_id")
results = self.DbQuery(query_db_target)
# Build a dictionary.
db_to_publish_info_dct = dict(
(db_id, []) for (db_id, unused_vh_name, unused_target_path,
unused_serve_wms) in results)
for db_id, vh_name, target_path, serve_wms in results:
db_to_publish_info_dct[db_id].append(
(vh_name, target_path, serve_wms))
return db_to_publish_info_dct
def _GetVhNameToBaseUrlDict(self):
"""Builds a dictionary mapping a virtual host name to a base URL.
Returns:
dictionary mapping a virtual host name to a base URL
(scheme://host[:port]) - {vh_name: vh_base_url}
"""
vh_list = self.QueryVhList()
vhname_to_baseurl_dct = {}
for (vh_name, vh_url, vh_ssl) in vh_list:
vhname_to_baseurl_dct[vh_name] = self.GetVhBaseUrl(vh_url, vh_ssl)
return vhname_to_baseurl_dct
def GetVhBaseUrl(self, vh_url, vh_ssl):
"""Builds a Virtual Host base URL.
If the vh_url is scheme://host:port/path, then it extracts
scheme://host:port to build a base URL, otherwise (vh_url is a path,
e.g. /public) it builds a base URL based on information in Apache config
and FQDN.
Args:
vh_url: virtual host URL - /path or scheme://host:[port]/path.
vh_ssl: whether virtual host is SSL.
Raises:
PublishServeException.
Returns:
virtual host base URL - scheme://host[:port]
"""
url_parse_res = urlparse.urlparse(vh_url)
if url_parse_res.scheme and url_parse_res.netloc:
return "{0}://{1}".format(url_parse_res.scheme, url_parse_res.netloc)
else:
# VH URL is specified as absolute path, then build VH base URL based on
# information in Apache config.
scheme_host_port = utils.GetApacheSchemeHostPort()
if not scheme_host_port:
raise exceptions.PublishServeException(
"Unable to build Server URL based on Apache config.")
else:
assert len(scheme_host_port) == 3
(scheme, host, port) = scheme_host_port
assert scheme
assert host
assert port
# override scheme in according with VH properties.
scheme = "https" if vh_ssl else "http"
host = "localhost" if vh_url == "/local_host" else host
vh_base_url = "{0}://{1}".format(scheme, host)
# Note: Do not pick up port from Apache config for SSL virtual host,
# use default port if SSL virtual host specified with absolute path.
if (not vh_ssl) and port and port != "80":
# Get port number for not SSL virtual host from Apache config and
# put it into URL if it is not default.
vh_base_url += ":{0}".format(port)
return vh_base_url
def _GetVhCompleteUrl(self, vh_url, vh_ssl):
"""Builds a Virtual Host complete URL.
If the vh_url is scheme://host:port/path, then just return vh_url,
otherwise (vh_url is an absolute path, e.g. /public) it builds a VH URL
based on information in Apache config and FQDN.
Args:
vh_url: virtual host URL - /path or scheme://host:[port]/path.
vh_ssl: whether virtual host is SSL.
Raises:
PublishServeException.
Returns:
virtual host complete URL - scheme://host[:port]/path
"""
vh_base_url = self.GetVhBaseUrl(vh_url, vh_ssl)
return urlparse.urljoin(vh_base_url, vh_url)
def _IsFusionDbRemote(self, db_info):
"""Determines whether a Fusion database has been pushed from remote host.
Args:
db_info: database info.
Returns:
whether a Fusion database has been pushed from remote Fusion host.
"""
assert serve_utils.IsFusionDb(db_info.type)
return db_info.host != self.server_hostname
def GetTargetDetails(self, target_path):
"""gets target details by target path.
Args:
target_path: target path.
Raises:
PublishServeException
Returns:
target details of a target.
"""
target_details = {}
target_db_details = {}
publish_context = {}
target_db_details = self._QueryTargetDbDetailsByPath(target_path)
if not target_db_details:
error_msg = (
"GetTargetDetails: No target details found for target path %s. "
"Make sure target path exists." % target_path)
raise exceptions.PublishServeException(error_msg)
target_details.update({
"targetpath": target_path,
"servewms": target_db_details["servewms"],
"fusion_host": target_db_details["fusion_host"],
"dbname": target_db_details["dbname"],
"vhname": target_db_details["vhname"],
})
publish_context = self._QueryPublishContextByTargetPath(target_path)
if publish_context:
target_details.update({"publishcontext": publish_context,})
return target_details
def HandleTargetDetailsRequest(self, target_path, response):
"""Handles 'targetdetails' request.
Args:
target_path: target path.
response: response object.
Raises:
PublishServeException
"""
target_details = self.GetTargetDetails(target_path)
if not target_details:
raise exceptions.PublishServeException(
"HandleTargetDetailsRequest: The publish target %s does not exist." %
target_path)
http_io.ResponseWriter.AddBodyElement(response, constants.HDR_STATUS_CODE,
constants.STATUS_SUCCESS)
for key, value in target_details.iteritems():
if key == "publishcontext":
for publish_context_key, publish_context_value in value.iteritems():
http_io.ResponseWriter.AddBodyElement(
response, constants.HDR_DATA,
"%s : %s" % (publish_context_key, publish_context_value))
else:
http_io.ResponseWriter.AddBodyElement(response, constants.HDR_DATA,
"%s : %s" % (key, value))
def IsDatabasePushed(self, client_host_name, db_name):
"""Check if the database is pushed.
Args:
client_host_name: Request originating host name.
db_name: Fusion database name.
Raises:
PublishServeException
Returns:
"True" if the database is pushed.
"""
(unused_db_path, db_type) = serve_utils.IdentifyPublishedDb(db_name)
# Check if the DB exists.
db_id = self.QueryDbId(client_host_name, db_name)
if db_id == 0:
raise exceptions.PublishServeException(
"Database %s does not exist on server.\n"
"It needs to be registered/pushed before publishing." % db_name)
# Check if the DB is pushed.
if self.SynchronizeDb(db_id, db_type, client_host_name):
error_msg = ("Database %s does not exist on server. It needs to be "
"registered/pushed before publishing." % db_name)
logger.error(error_msg)
raise exceptions.PublishServeException(error_msg)
return True
def SwapTargets(self, target_path_a, target_path_b):
"""Check if the targets can be swapped.
Args:
target_path_a: First target path.
target_path_b: Second target path.
Raises:
PublishServeException
Returns:
Publish Context of both targets.
"""
# Check if the target paths are the same.
if target_path_a == target_path_b:
raise exceptions.PublishServeException(
"HandleSwapTargetsRequest:target paths %s and %s are same." %
(target_path_a, target_path_b))
# Get target details for target_path_a.
target_details_a = self.GetTargetDetails(target_path_a)
if not target_details_a:
raise exceptions.PublishServeException(
"HandleSwapTargetsRequest: Make sure the target path %s "
"exists and is currently published." % target_path_a)
if "publishcontext" not in target_details_a.keys():
error_msg = ("SwapTargets: publish context does not exist "
"for target path %s. This command is not supported "
"for databases published with GEE 5.1.2 or earlier." %
target_path_a)
raise exceptions.PublishServeException(error_msg)
# Get target details for target_path_b.
target_details_b = self.GetTargetDetails(target_path_b)
if not target_details_b:
raise exceptions.PublishServeException(
"HandleSwapTargetsRequest: Make sure the target path '%s' "
"exists and is currently published." % target_path_b)
if "publishcontext" not in target_details_b.keys():
error_msg = (
"SwapTargets: publish context does not exist "
"for target path %s. This command is not supported for databases "
"pubished using older version of fusion." % target_path_b)
raise exceptions.PublishServeException(error_msg)
# Swap target paths.
t_path = target_details_a["targetpath"]
target_details_a["targetpath"] = target_details_b["targetpath"]
target_details_b["targetpath"] = t_path
return (target_details_a, target_details_b)
def AreDatabasesComparable(self, db_name1, host_name1, db_name2, host_name2):
"""Check if the databases are same.
Args:
db_name1: First database.
host_name1 : GEE host where db_name1 is published.
db_name2: Second database.
host_name2 : GEE host where db_name2 is published.
Returns:
boolean value depending on whether databases are comparable or not.
"""
if host_name1 != host_name2:
return False
p = re.compile(r".*/(.*)/.*\.kda/.*")
match = p.search(db_name1)
if match:
dbname_1 = match.groups()[0]
match = p.search(db_name2)
if match:
dbname_2 = match.groups()[0]
return dbname_1 == dbname_2
return False
def main():
pass
if __name__ == "__main__":
main()
| apache-2.0 |
cntnboys/410Lab6 | build/django/build/lib.linux-x86_64-2.7/django/utils/termcolors.py | 100 | 7492 | """
termcolors.py
"""
from django.utils import six
color_names = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white')
foreground = dict((color_names[x], '3%s' % x) for x in range(8))
background = dict((color_names[x], '4%s' % x) for x in range(8))
RESET = '0'
opt_dict = {'bold': '1', 'underscore': '4', 'blink': '5', 'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print(colorize('first line', fg='red', opts=('noreset',)))
print('this should be red too')
print(colorize('and so should this'))
print('this should not be red')
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, v in six.iteritems(kwargs):
if k == 'fg':
code_list.append(foreground[v])
elif k == 'bg':
code_list.append(background[v])
for o in opts:
if o in opt_dict:
code_list.append(opt_dict[o])
if 'noreset' not in opts:
text = '%s\x1b[%sm' % (text or '', RESET)
return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print(bold_red('hello'))
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'WARNING': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'WARNING': {'fg': 'yellow', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
'MIGRATE_HEADING': {'fg': 'cyan', 'opts': ('bold',)},
'MIGRATE_LABEL': {'opts': ('bold',)},
'MIGRATE_SUCCESS': {'fg': 'green', 'opts': ('bold',)},
'MIGRATE_FAILURE': {'fg': 'red', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color.
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword', 'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
fg = colors.pop()
if fg in color_names:
definition['fg'] = fg
if colors and colors[-1] in color_names:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in opt_dict.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| apache-2.0 |
jlward/django-authority | authority/widgets.py | 3 | 2809 | from django import forms
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
generic_script = """
<script type="text/javascript">
function showGenericRelatedObjectLookupPopup(ct_select, triggering_link, url_base) {
var url = content_types[ct_select.options[ct_select.selectedIndex].value];
if (url != undefined) {
triggering_link.href = url_base + url;
return showRelatedObjectLookupPopup(triggering_link);
}
return false;
}
</script>
"""
class GenericForeignKeyRawIdWidget(ForeignKeyRawIdWidget):
def __init__(self, ct_field, cts=[], attrs=None):
self.ct_field = ct_field
self.cts = cts
forms.TextInput.__init__(self, attrs)
def render(self, name, value, attrs=None):
if attrs is None:
attrs = {}
related_url = "../../../"
params = self.url_parameters()
if params:
url = "?" + "&".join(["%s=%s" % (k, v) for k, v in params.iteritems()])
else:
url = ""
if "class" not in attrs:
attrs["class"] = "vForeignKeyRawIdAdminField"
output = [forms.TextInput.render(self, name, value, attrs)]
output.append(
"""%(generic_script)s
<a href="%(related)s%(url)s"
class="related-lookup"
id="lookup_id_%(name)s"
onclick="return showGenericRelatedObjectLookupPopup(
document.getElementById('id_%(ct_field)s'), this, '%(related)s%(url)s');">
"""
% {
"generic_script": generic_script,
"related": related_url,
"url": url,
"name": name,
"ct_field": self.ct_field,
}
)
output.append(
'<img src="%s/admin/img/selector-search.gif" width="16" height="16" alt="%s" /></a>'
% (settings.STATIC_URL, _("Lookup"))
)
from django.contrib.contenttypes.models import ContentType
content_types = """
<script type="text/javascript">
var content_types = new Array();
%s
</script>
""" % (
"\n".join(
[
"content_types[%s] = '%s/%s/';"
% (
ContentType.objects.get_for_model(ct).id,
ct._meta.app_label,
ct._meta.object_name.lower(),
)
for ct in self.cts
]
)
)
return mark_safe(u"".join(output) + content_types)
def url_parameters(self):
return {}
| bsd-3-clause |
garoa/pingo | pingo/parts/button.py | 7 | 1937 | import time
import threading
class Switch(object):
"""Button like component with two stable states"""
def __init__(self, pin):
"""
:param pin: A instance of DigitalPin
"""
self.pin = pin
self.pin.mode = 'IN'
self.polling_task = None
self._up_callback = lambda: None
self._down_callback = lambda: None
def set_callback_up(self, callback, *args, **kwargs):
def callback_wrapper():
return callback(*args, **kwargs)
self._up_callback = callback_wrapper
def set_callback_down(self, callback, *args, **kwargs):
def callback_wrapper():
return callback(*args, **kwargs)
self._down_callback = callback_wrapper
def stop(self):
if self.polling_task is not None:
if self.polling_task.active:
self.polling_task.terminate()
self.polling_task = None
def start(self):
if self.polling_task is not None:
if self.polling_task.active:
self.stop()
self.polling_task = PollingTask(self)
threading.Thread(target=self.polling_task.run).start()
class PollingTask(object):
def __init__(self, switch):
"""
:param switch: Switch instance to poll
"""
self.switch = switch
self.active = False
def terminate(self):
self.active = False
def run(self):
self.active = True
last_state = self.switch.pin.state
while self.active:
current_state = self.switch.pin.state
if current_state != last_state:
if current_state == 'HIGH':
last_state = current_state
self.switch._up_callback()
elif current_state == 'LOW':
last_state = current_state
self.switch._down_callback()
time.sleep(0.05)
| mit |
ntamas/python-selecta | selecta/__main__.py | 1 | 3608 | from __future__ import print_function
import argparse
import sys
from selecta.indexing import FuzzyIndex
from selecta.ui import DumbTerminalUI, SmartTerminalUI
from selecta.utils import identity
from selecta.terminal import reopened_terminal, Terminal
__version__ = "0.0.1"
KNOWN_UI_CLASSES = dict(
dumb=DumbTerminalUI,
smart=SmartTerminalUI
)
def main(args=None):
"""The main entry point of the command line application.
Args:
args (list of str): the command line arguments
Returns:
int: the exit code of the application
"""
if args is None:
args = sys.argv[1:]
parser = create_command_line_parser()
options = parser.parse_args(args)
if options.show_version:
print(__version__)
return
index = prepare_index()
with reopened_terminal():
ui_factory = KNOWN_UI_CLASSES[options.ui]
selection = process_input(index, options.initial_query,
ui_factory=ui_factory)
if selection is not None:
print(selection)
return selection is None
def create_command_line_parser():
"""Creates and returns the command line argument parser."""
ui_names = sorted(KNOWN_UI_CLASSES.keys())
parser = argparse.ArgumentParser(prog="selecta")
parser.add_argument("--version", help="show the version number",
action="store_true", default=False,
dest="show_version")
parser.add_argument("-s", "--search", dest="initial_query",
metavar="SEARCH", default=None,
help="specify an initial search string")
parser.add_argument("--ui", dest="ui", metavar="UI", default="smart",
choices=ui_names,
help="use the given user interface; valid choices "
"are: {0!r}".format(ui_names))
return parser
def prepare_index(strings=sys.stdin, transform=unicode.strip, encoding=None):
"""Prepares the index to be used by the application from strings coming
from the given input stream or iterable.
Args:
strings (iterable of str): the strings to be included in the index
transform (callable or None): a callable to call on each of the strings
from the iterable before they are fed into the index
encoding (str or None): the encoding of the strings in the iterable
if they are not Unicode. ``None`` means to fall back to the
``encoding`` attribute of the ``strings`` iterable if there is
such an attribute, or to ``sys.getdefaultencoding()``.
Returns:
selecta.indexing.Index: the prepared index
"""
transform = transform or identity
encoding = encoding or getattr(strings, "encoding", None) or \
sys.getdefaultencoding()
index = FuzzyIndex()
for string in strings:
if not isinstance(string, unicode):
string = string.decode(encoding)
index.add(transform(string))
return index
def process_input(index, initial_query=None, ui_factory=SmartTerminalUI):
# Note that we force the Terminal factory to assume that we are connected
# to a TTY. This is intentional; we know that because we have reopened
# /dev/tty (on Linux and Mac) or CON (on Windows) before.
with Terminal.create(is_tty=True) as terminal:
ui = ui_factory(terminal)
with ui.use(index):
match = ui.choose_item(initial_query)
return match.matched_object if match else None
if __name__ == "__main__":
sys.exit(main())
| mit |
Big-B702/python-for-android | python3-alpha/python3-src/Lib/curses/has_key.py | 195 | 5634 |
#
# Emulation of has_key() function for platforms that don't use ncurses
#
import _curses
# Table mapping curses keys to the terminfo capability name
_capability_names = {
_curses.KEY_A1: 'ka1',
_curses.KEY_A3: 'ka3',
_curses.KEY_B2: 'kb2',
_curses.KEY_BACKSPACE: 'kbs',
_curses.KEY_BEG: 'kbeg',
_curses.KEY_BTAB: 'kcbt',
_curses.KEY_C1: 'kc1',
_curses.KEY_C3: 'kc3',
_curses.KEY_CANCEL: 'kcan',
_curses.KEY_CATAB: 'ktbc',
_curses.KEY_CLEAR: 'kclr',
_curses.KEY_CLOSE: 'kclo',
_curses.KEY_COMMAND: 'kcmd',
_curses.KEY_COPY: 'kcpy',
_curses.KEY_CREATE: 'kcrt',
_curses.KEY_CTAB: 'kctab',
_curses.KEY_DC: 'kdch1',
_curses.KEY_DL: 'kdl1',
_curses.KEY_DOWN: 'kcud1',
_curses.KEY_EIC: 'krmir',
_curses.KEY_END: 'kend',
_curses.KEY_ENTER: 'kent',
_curses.KEY_EOL: 'kel',
_curses.KEY_EOS: 'ked',
_curses.KEY_EXIT: 'kext',
_curses.KEY_F0: 'kf0',
_curses.KEY_F1: 'kf1',
_curses.KEY_F10: 'kf10',
_curses.KEY_F11: 'kf11',
_curses.KEY_F12: 'kf12',
_curses.KEY_F13: 'kf13',
_curses.KEY_F14: 'kf14',
_curses.KEY_F15: 'kf15',
_curses.KEY_F16: 'kf16',
_curses.KEY_F17: 'kf17',
_curses.KEY_F18: 'kf18',
_curses.KEY_F19: 'kf19',
_curses.KEY_F2: 'kf2',
_curses.KEY_F20: 'kf20',
_curses.KEY_F21: 'kf21',
_curses.KEY_F22: 'kf22',
_curses.KEY_F23: 'kf23',
_curses.KEY_F24: 'kf24',
_curses.KEY_F25: 'kf25',
_curses.KEY_F26: 'kf26',
_curses.KEY_F27: 'kf27',
_curses.KEY_F28: 'kf28',
_curses.KEY_F29: 'kf29',
_curses.KEY_F3: 'kf3',
_curses.KEY_F30: 'kf30',
_curses.KEY_F31: 'kf31',
_curses.KEY_F32: 'kf32',
_curses.KEY_F33: 'kf33',
_curses.KEY_F34: 'kf34',
_curses.KEY_F35: 'kf35',
_curses.KEY_F36: 'kf36',
_curses.KEY_F37: 'kf37',
_curses.KEY_F38: 'kf38',
_curses.KEY_F39: 'kf39',
_curses.KEY_F4: 'kf4',
_curses.KEY_F40: 'kf40',
_curses.KEY_F41: 'kf41',
_curses.KEY_F42: 'kf42',
_curses.KEY_F43: 'kf43',
_curses.KEY_F44: 'kf44',
_curses.KEY_F45: 'kf45',
_curses.KEY_F46: 'kf46',
_curses.KEY_F47: 'kf47',
_curses.KEY_F48: 'kf48',
_curses.KEY_F49: 'kf49',
_curses.KEY_F5: 'kf5',
_curses.KEY_F50: 'kf50',
_curses.KEY_F51: 'kf51',
_curses.KEY_F52: 'kf52',
_curses.KEY_F53: 'kf53',
_curses.KEY_F54: 'kf54',
_curses.KEY_F55: 'kf55',
_curses.KEY_F56: 'kf56',
_curses.KEY_F57: 'kf57',
_curses.KEY_F58: 'kf58',
_curses.KEY_F59: 'kf59',
_curses.KEY_F6: 'kf6',
_curses.KEY_F60: 'kf60',
_curses.KEY_F61: 'kf61',
_curses.KEY_F62: 'kf62',
_curses.KEY_F63: 'kf63',
_curses.KEY_F7: 'kf7',
_curses.KEY_F8: 'kf8',
_curses.KEY_F9: 'kf9',
_curses.KEY_FIND: 'kfnd',
_curses.KEY_HELP: 'khlp',
_curses.KEY_HOME: 'khome',
_curses.KEY_IC: 'kich1',
_curses.KEY_IL: 'kil1',
_curses.KEY_LEFT: 'kcub1',
_curses.KEY_LL: 'kll',
_curses.KEY_MARK: 'kmrk',
_curses.KEY_MESSAGE: 'kmsg',
_curses.KEY_MOVE: 'kmov',
_curses.KEY_NEXT: 'knxt',
_curses.KEY_NPAGE: 'knp',
_curses.KEY_OPEN: 'kopn',
_curses.KEY_OPTIONS: 'kopt',
_curses.KEY_PPAGE: 'kpp',
_curses.KEY_PREVIOUS: 'kprv',
_curses.KEY_PRINT: 'kprt',
_curses.KEY_REDO: 'krdo',
_curses.KEY_REFERENCE: 'kref',
_curses.KEY_REFRESH: 'krfr',
_curses.KEY_REPLACE: 'krpl',
_curses.KEY_RESTART: 'krst',
_curses.KEY_RESUME: 'kres',
_curses.KEY_RIGHT: 'kcuf1',
_curses.KEY_SAVE: 'ksav',
_curses.KEY_SBEG: 'kBEG',
_curses.KEY_SCANCEL: 'kCAN',
_curses.KEY_SCOMMAND: 'kCMD',
_curses.KEY_SCOPY: 'kCPY',
_curses.KEY_SCREATE: 'kCRT',
_curses.KEY_SDC: 'kDC',
_curses.KEY_SDL: 'kDL',
_curses.KEY_SELECT: 'kslt',
_curses.KEY_SEND: 'kEND',
_curses.KEY_SEOL: 'kEOL',
_curses.KEY_SEXIT: 'kEXT',
_curses.KEY_SF: 'kind',
_curses.KEY_SFIND: 'kFND',
_curses.KEY_SHELP: 'kHLP',
_curses.KEY_SHOME: 'kHOM',
_curses.KEY_SIC: 'kIC',
_curses.KEY_SLEFT: 'kLFT',
_curses.KEY_SMESSAGE: 'kMSG',
_curses.KEY_SMOVE: 'kMOV',
_curses.KEY_SNEXT: 'kNXT',
_curses.KEY_SOPTIONS: 'kOPT',
_curses.KEY_SPREVIOUS: 'kPRV',
_curses.KEY_SPRINT: 'kPRT',
_curses.KEY_SR: 'kri',
_curses.KEY_SREDO: 'kRDO',
_curses.KEY_SREPLACE: 'kRPL',
_curses.KEY_SRIGHT: 'kRIT',
_curses.KEY_SRSUME: 'kRES',
_curses.KEY_SSAVE: 'kSAV',
_curses.KEY_SSUSPEND: 'kSPD',
_curses.KEY_STAB: 'khts',
_curses.KEY_SUNDO: 'kUND',
_curses.KEY_SUSPEND: 'kspd',
_curses.KEY_UNDO: 'kund',
_curses.KEY_UP: 'kcuu1'
}
def has_key(ch):
if isinstance(ch, str):
ch = ord(ch)
# Figure out the correct capability name for the keycode.
capability_name = _capability_names.get(ch)
if capability_name is None:
return False
#Check the current terminal description for that capability;
#if present, return true, else return false.
if _curses.tigetstr( capability_name ):
return True
else:
return False
if __name__ == '__main__':
# Compare the output of this implementation and the ncurses has_key,
# on platforms where has_key is already available
try:
L = []
_curses.initscr()
for key in _capability_names.keys():
system = _curses.has_key(key)
python = has_key(key)
if system != python:
L.append( 'Mismatch for key %s, system=%i, Python=%i'
% (_curses.keyname( key ), system, python) )
finally:
_curses.endwin()
for i in L: print(i)
| apache-2.0 |
gnu3ra/PyBitmessage | src/socks/__init__.py | 15 | 16174 | """SocksiPy - Python SOCKS module.
Version 1.00
Copyright 2006 Dan-Haim. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of Dan Haim nor the names of his contributors may be used
to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA
OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE.
This module provides a standard socket-like interface for Python
for tunneling connections through SOCKS proxies.
"""
"""
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
import sys
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
| mit |
Livefyre/django-cms | cms/south_migrations/0047_auto__del_field_title_application_urls.py | 48 | 16352 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.model_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Title.application_urls'
db.delete_column(u'cms_title', 'application_urls')
def backwards(self, orm):
# Adding field 'Title.application_urls'
db.add_column(u'cms_title', 'application_urls',
self.gf('django.db.models.fields.CharField')(blank=True, max_length=200, null=True, db_index=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'object_name': 'Page'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')", 'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['%s']" % user_orm_label}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['%s']" % user_orm_label, 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['%s']" % user_orm_label}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
matmiiiin/swallow | swallow/inout/JsonFileio.py | 2 | 1406 | import json
from swallow.logger_mp import get_logger_mp
class JsonFileio:
"""Reads all Docs of a Json file and pushes them into a queue"""
def scan_and_queue(self, p_queue, p_file):
""" Reads json file and pushes docs to the queue
If the file contains a list, each doc is pushed in the queue
If the file contains a doc, the whole doc is pushed in the queue
p_queue: Queue where items are pushed to
p_file: Json File to scan
"""
logger = get_logger_mp(__name__, self.log_queue, self.log_level, self.formatter)
logger.info('Scanning json in %s', p_file)
# Each items is put into the queue
try:
documents = json.load(open(p_file))
except Exception as e:
logger.error("Can't read the file %s", p_file)
logger.error(e)
if isinstance(documents, list):
for doc in documents:
p_queue.put(doc)
with self.counters['nb_items_scanned'].get_lock():
self.counters['nb_items_scanned'].value += 1
if self.counters['nb_items_scanned'].value % self.counters['log_every'] == 0:
logger.info("Scan in progress : {0} items read from source".format(self.counters['nb_items_scanned'].value))
else:
p_queue.put(documents)
| gpl-2.0 |
JingJunYin/tensorflow | tensorflow/python/framework/importer_test.py | 5 | 47663 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.importer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import op_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
@test_util.with_c_api
class ImportGraphDefTest(test.TestCase):
def _MakeGraphDef(self,
text,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
text = "versions: { producer: %d min_consumer: %d };\n%s" % (producer,
min_consumer,
text)
ret = graph_pb2.GraphDef()
text_format.Merge(text, ret)
return ret
def testBasic(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutputFloatOutput' }
node { name: 'B' op: 'ListOutput'
attr { key: 'T'
value { list { type: DT_INT32 type: DT_FLOAT } } } }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_FLOAT } }
input: 'A:1' input: 'B:1' }
"""),
return_elements=["A", "B", "C", "D"],
name="import")
# Assert that the import process creates distinct tensors.
self.assertNotEqual(a.outputs[0].name, a.outputs[1].name)
self.assertNotEqual(b.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[0].name, b.outputs[1].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[0].name)
self.assertNotEqual(a.outputs[1].name, b.outputs[1].name)
# Assert that the ops are connected according to the GraphDef topology.
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], b.outputs[1])
# Check the types of the returned ops and tensors.
self.assertEqual(a.type, "IntOutputFloatOutput")
self.assertEqual(b.type, "ListOutput")
self.assertEqual(c.type, "ListInput")
self.assertEqual(d.type, "ListInput")
self.assertEqual(a.outputs[0].dtype, dtypes.int32)
self.assertEqual(a.outputs[1].dtype, dtypes.float32)
self.assertEqual(b.outputs[0].dtype, dtypes.int32)
self.assertEqual(b.outputs[1].dtype, dtypes.float32)
# Check the names of the returned ops.
self.assertEqual(a.name, "import/A")
self.assertEqual(b.name, "import/B")
self.assertEqual(c.name, "import/C")
self.assertEqual(d.name, "import/D")
# Check that the op_def is still available.
self.assertNotEqual(None, a.op_def)
def testMultipleImport(self):
graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
""")
with ops.Graph().as_default():
# Initial import
a, b = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(list(b.inputs), [a.outputs[0]])
# Repeat the same import
a1, b1 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a1.name, "A_1")
self.assertEqual(b1.name, "B_1")
self.assertEqual(list(b1.inputs), [a1.outputs[0]])
# Repeat the same import again
a2, b2 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="")
self.assertEqual(a2.name, "A_2")
self.assertEqual(b2.name, "B_2")
self.assertEqual(list(b2.inputs), [a2.outputs[0]])
# Import with an already-used name
a3, b3 = importer.import_graph_def(
graph_def,
return_elements=["A", "B"],
name="A")
self.assertEqual(a3.name, "A_3/A")
self.assertEqual(b3.name, "A_3/B")
self.assertEqual(list(b3.inputs), [a3.outputs[0]])
# Import with existing de-duped node names
a1_1, b1_1 = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A_1' op: 'IntOutput' }
node { name: 'B_1' op: 'IntInput' input: 'A_1:0' }
"""),
return_elements=["A_1", "B_1"],
name="")
self.assertEqual(a1_1.name, "A_1_1")
self.assertEqual(b1_1.name, "B_1_1")
self.assertEqual(list(b1_1.inputs), [a1_1.outputs[0]])
# Create a name scope and then import node with same name
with ops.name_scope("foo"):
constant_op.constant(1)
foo, = importer.import_graph_def(
self._MakeGraphDef("node { name: 'foo' op: 'IntOutput' }"),
return_elements=["foo"],
name="")
self.assertEqual(foo.name, "foo_1")
# Imported node name can't conflict with intermediate name scope (but can
# conflict with outer scope and full name scope)
with ops.name_scope("outer"):
with ops.name_scope("inner"):
c = constant_op.constant(1, name="c")
self.assertEqual(c.op.name, "outer/inner/c")
outer, inner, new_c, outer_inner, outer_inner_c = (
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'outer' op: 'IntOutput' }"
"node { name: 'inner' op: 'IntOutput' }"
"node { name: 'c' op: 'IntOutput' }"
"node { name: 'outer/inner' op: 'IntOutput' }"
"node { name: 'outer/inner/c' op: 'IntOutput' }"),
return_elements=["outer", "inner", "c", "outer/inner",
"outer/inner/c"],
name=""))
self.assertEqual(outer.name, "outer_1")
self.assertEqual(inner.name, "inner")
self.assertEqual(new_c.name, "c")
self.assertEqual(outer_inner.name, "outer/inner_1")
self.assertEqual(outer_inner_c.name, "outer/inner/c_1")
def testInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={"A:0": feed_a_0,
"B:1": feed_b_1},
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapBytes(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={b"A:0": feed_a_0,
b"B:1": feed_b_1},
return_elements=[b"A", b"B", b"C", b"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testInputMapUnicode(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
feed_b_1 = constant_op.constant(1, dtype=dtypes.int32)
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'TwoIntOutputs' }
node { name: 'C' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'ListInput'
attr { key: 'N' value { i: 2 } }
attr { key: 'T' value { type: DT_INT32 } }
input: 'A:1' input: 'B:1' }
"""),
input_map={u"A:0": feed_a_0,
u"B:1": feed_b_1},
return_elements=[u"A", u"B", u"C", u"D"])
self.assertEqual(c.inputs[0], feed_a_0)
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[1])
self.assertEqual(d.inputs[1], feed_b_1)
def testImplicitZerothOutput(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.inputs[0], a.outputs[0])
def testInputMapImplicitZerothOutput(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(0, dtype=dtypes.int32)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'TwoIntOutputs' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testWithControlDependency(self):
with ops.Graph().as_default():
a, b = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' input: '^A' }
"""),
return_elements=["A", "B"])
self.assertEqual(b.control_inputs, [a])
def testWithRefs(self):
with ops.Graph().as_default():
a, b, c, d = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'RefOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'C' op: 'TwoIntInputs' input: 'A:0' input: 'B:0' }
node { name: 'D' op: 'RefInputIntInput' input: 'A:0' input: 'B:0' }
"""),
return_elements=["A", "B", "C", "D"])
self.assertEqual(c.inputs[0], a.outputs[0])
self.assertEqual(c.inputs[1], b.outputs[0])
self.assertEqual(d.inputs[0], a.outputs[0])
self.assertEqual(d.inputs[1], b.outputs[0])
self.assertEqual(a.outputs[0].dtype, dtypes.int32_ref)
self.assertEqual(c._input_dtypes, [dtypes.int32, dtypes.int32])
self.assertEqual(c.outputs, [])
self.assertEqual(d._input_dtypes, [dtypes.int32_ref, dtypes.int32])
self.assertEqual(d.outputs, [])
def testWhileLoop(self):
# Produce GraphDef containing while loop.
graph = ops.Graph()
with graph.as_default():
r = control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + 1, [0])
graph_def = graph.as_graph_def()
# Import the GraphDef and make sure it runs.
with ops.Graph().as_default():
imported_r, = importer.import_graph_def(graph_def,
return_elements=[r.name])
self.assertEqual(imported_r.name, "import/" + r.name)
with self.test_session() as sess:
self.assertEqual(sess.run(imported_r), 10)
def testTypeMismatchInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = ("Input 0 of node import/B was passed int32 from import/A:0 "
"incompatible with expected float.")
else:
error_msg = ("Cannot convert a tensor of type int32 to an input of type "
"float")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testShapeWhitelist(self):
# Barrier's shape is an output vector of 2, but the
# graph says it's a scalar. This is currently whitelisted.
with ops.Graph().as_default():
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'Barrier'
attr { key: '_output_shapes'
value { list { shape { } } } }
attr { key: 'component_types'
value { list { type: DT_FLOAT } } } }
"""),
return_elements=["A"],
name="import")
def testShapeWhitelistViolation(self):
# L2 loss produces a scalar shape, but the graph
# has the wrong shape, so raise an error.
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
_ = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'L2Loss'
input: 'A:0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: '_output_shapes'
value { list { shape { dim { size: 43 } } } } } }
"""),
return_elements=["B"],
name="import")
self.assertTrue(
"Shapes () and (43,) are not compatible" in str(e.exception))
def testInvalidSignatureTooManyInputsInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = "NodeDef expected inputs '' do not match 1 inputs specified"
else:
error_msg = r"More inputs specified \('A:0'\) than the op expects"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'None' input: 'A:0' }
"""))
def testInvalidSignatureNotEnoughInputsInGraphDef(self):
if ops._USE_C_API:
# TODO(skyewm): improve error message
error_msg = ("NodeDef expected inputs 'int32, float' do not match 1 "
"inputs specified")
else:
error_msg = (r"Input types mismatch \(expected 'int32, float32' but "
r"got 'int32'\)")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInputFloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:0'"
else:
error_msg = "Input tensor 'A:0' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""))
def testMissingInputOpInGraphDefButAppearsInInputMap(self):
with ops.Graph().as_default():
feed_a_0 = constant_op.constant(5.0)
b, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'FloatInput' input: 'A:0' }
"""),
input_map={"A:0": feed_a_0},
return_elements=["B"])
self.assertEqual(b.inputs[0], feed_a_0)
def testMissingInputTensorInGraphDef(self):
if ops._USE_C_API:
error_msg = ("Node 'B': Connecting to invalid output 1 of source node A "
"which has 1 outputs")
else:
error_msg = "Input tensor 'A:1' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'FloatOutput' }
node { name: 'B' op: 'FloatInput' input: 'A:1' }
"""))
def testMissingControlInputInGraphDef(self):
if ops._USE_C_API:
error_msg = r"Node 'B': Unknown input node '\^A'"
else:
error_msg = r"Control input '\^A' not found"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: '^A' }
"""))
def testInvalidTensorNameOutputIndexInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:B'"
else:
error_msg = "Cannot convert 'A:B' to a tensor name."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B' }
"""))
def testInvalidTensorNameInGraphDef(self):
if ops._USE_C_API:
error_msg = "Node 'B': Unknown input node 'A:B:0'"
else:
error_msg = "Cannot convert 'A:B:0' to a tensor name."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'B' op: 'None' input: 'A:B:0' }
"""))
def testMissingReturnOperation(self):
if ops._USE_C_API:
error_msg = "Requested return node 'B' not found in graph def"
else:
error_msg = "return_element 'B' not found in graph_def."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["B"])
def testMissingReturnTensor(self):
if ops._USE_C_API:
error_msg = (r"Invalid return output 1 of node 'A', which has 1 "
r"output\(s\)")
else:
error_msg = "return_element 'A:1' not found in graph_def."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:1"])
if ops._USE_C_API:
error_msg = "Requested return tensor 'B:0' not found in graph def"
else:
error_msg = "return_element 'B:0' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["B:0"])
if ops._USE_C_API:
error_msg = "Cannot convert 'A:B:0' to a tensor name."
else:
error_msg = "return_element 'A:B:0' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
return_elements=["A:B:0"])
def testMissingInputMap(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[B:0\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
input_map={"B:0": constant_op.constant(5.0)})
def testInputMapUnusedAsInput(self):
with ops.Graph().as_default():
# Mapping an unused node output should succeed.
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
# Mapping a non-existent output of an existing node should fail.
with self.assertRaisesRegexp(
ValueError,
r"Attempted to map inputs that were not found in graph_def: \[A:2\]"):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
"""),
input_map={"A:2": constant_op.constant(5.0)})
def testInputMapTypeMismatch(self):
if ops._USE_C_API:
error_msg = ("Input 0 of node import/B was passed float from Const:0 "
"incompatible with expected int32.")
else:
error_msg = ("Cannot convert a tensor of type float32 to an input of "
"type int32.")
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntInput' input: 'A:0' }
"""),
input_map={"A:0": constant_op.constant(5.0)})
def testNoReturns(self):
with ops.Graph().as_default() as g:
ret = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""))
self.assertEqual(ret, None)
a = g.get_operation_by_name("import/A")
self.assertEqual(a.type, "None")
def testOverrideNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name="imported_graph")
self.assertEqual(a.name, "imported_graph/A")
def testDefaultNamePrefix(self):
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'None' }
"""),
return_elements=["A"],
name=None)
self.assertEqual(a.name, "import/A")
def testNamePrefixColocationAttrs(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
b, = importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
self.assertTrue("_class" in b.node_def.attr)
self.assertProtoEquals(
"list { s: 'loc:@imported_graph/A' }",
b.node_def.attr["_class"])
def testColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
# A device function that places "A" on one device and "B" on
# another device. Because B is colocated with A, we test that B's
# device function is overridden by A.
def CustomDeviceFn(op):
if "A" in op.name:
return "/device:A:0"
else:
return "/device:B:0"
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Test a scenario where 'A' doesn't get a device; 'A' should not have a
# device, but during runtime will get colocated with 'B' because of the
# colocation attribute. B's device function is still overridden by A.
def BDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(BDeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
# Only A gets a device, so B inherits it implicitly.
def ADeviceFn(op):
if "A" in op.name:
return "/device:A:0"
return ""
with ops.Graph().as_default():
with ops.device(ADeviceFn):
a, b = importer.import_graph_def(original_graph_def,
return_elements=["A", "B"],
name="imported_graph")
self.assertEqual(a.device, "/device:A:0")
self.assertEqual(b.device, "/device:A:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/A"])
def testMultipleColocationWithDeviceFn(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None'}
node { name: 'B' op: 'None'}
node { name: 'C' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' s: 'loc:@B' } }
} }""")
# A device function that places "B" on a device, and "A" is empty.
#
# B and C should contain "/device:B". A will not right now. But
# because of the colocation property, at runtime it would be
# placed with B and C.
def CustomDeviceFn(op):
if "B" in op.name:
return "/device:B:0"
return ""
with ops.Graph().as_default():
with ops.device(CustomDeviceFn):
a, b, c = importer.import_graph_def(original_graph_def,
return_elements=["A", "B", "C"],
name="imported_graph")
self.assertEqual(a.device, "")
self.assertEqual(b.device, "/device:B:0")
self.assertEqual(c.device, "/device:B:0")
self.assertEqual(a.colocation_groups(), [b"loc:@imported_graph/A"])
self.assertEqual(b.colocation_groups(), [b"loc:@imported_graph/B"])
self.assertEqual(c.colocation_groups(),
[b"loc:@imported_graph/A", b"loc:@imported_graph/B"])
def testNamePrefixColocationAttrsMultipleImport(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'A' op: 'None' }
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
with ops.Graph().as_default():
a, b = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
a_1, b_1 = importer.import_graph_def(
original_graph_def, return_elements=["A", "B"], name="")
self.assertEqual(a.name, "A")
self.assertEqual(b.name, "B")
self.assertEqual(b.colocation_groups(), [b"loc:@A"])
self.assertEqual(a_1.name, "A_1")
self.assertEqual(b_1.name, "B_1")
self.assertEqual(b_1.colocation_groups(), [b"loc:@A_1"])
def testNamePrefixColocationAttrsNotFound(self):
original_graph_def = self._MakeGraphDef("""
node { name: 'B' op: 'None' attr {
key: '_class'
value { list { s: 'loc:@A' } }
} }""")
if ops._USE_C_API:
error_msg = "Node 'B' expects to be colocated with unknown node 'A'"
else:
error_msg = "does not exist during import"
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
original_graph_def, return_elements=["B"], name="imported_graph")
def testEmptyGraph(self):
with ops.Graph().as_default() as g:
init_version = g.version
importer.import_graph_def(self._MakeGraphDef(""))
self.assertEqual(init_version, g.version)
def testInvalidInputForGraphDef(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def("")
self.assertEqual("graph_def must be a GraphDef proto.", str(e.exception))
def testInvalidInputForInputMap(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as e:
importer.import_graph_def(
self._MakeGraphDef(""), input_map=[constant_op.constant(5.0)])
self.assertEqual("input_map must be a dictionary mapping strings to "
"Tensor objects.", str(e.exception))
graph_def = self._MakeGraphDef("""
node { name: 'a' op: 'Placeholder'
attr { key: 'dtype' value { type: DT_FLOAT } }}
node { name: 'id' op: 'Identity' input: 'a:0'
attr { key: 'T' value { type: DT_FLOAT } }}""")
with ops.Graph().as_default():
with self.assertRaises(ValueError) as e:
importer.import_graph_def(
graph_def,
input_map={"a:0": variables.Variable(5.0)},
name="")
self.assertStartsWith(str(e.exception),
"tf.import_graph_def() requires a non-empty `name` "
"if `input_map` contains non-Tensor values.")
with ops.Graph().as_default():
t, = importer.import_graph_def(
graph_def,
input_map={"a:0": constant_op.constant(5.0)},
name="",
return_elements=["id:0"])
with self.test_session():
self.assertEqual(5.0, t.eval())
def testInvalidInputForReturnOperations(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(
TypeError, "return_elements must be a list of strings."):
importer.import_graph_def(self._MakeGraphDef(""), return_elements=[7])
if ops._USE_C_API:
error_msg = "Cannot convert 'a:b:c' to a tensor name."
else:
error_msg = "Requested return_element 'a:b:c' not found in graph_def."
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(self._MakeGraphDef(""),
return_elements=["a:b:c"])
def testDuplicateOperationNames(self):
if ops._USE_C_API:
error_msg = "Node 'A' is not unique"
else:
error_msg = "Duplicate name 'A' in GraphDef."
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, error_msg):
importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'IntOutput' }
node { name: 'B' op: 'IntOutput' }
node { name: 'A' op: 'IntOutput' }
"""))
def testWithExtensionAndAttr(self):
with ops.Graph().as_default() as g:
c = constant_op.constant(5.0, dtype=dtypes.float32, name="c")
array_ops.stack([c, c], name="pack")
gdef = g.as_graph_def()
with self.test_session():
pack, = importer.import_graph_def(gdef, return_elements=["pack"])
self.assertAllEqual(pack.outputs[0].eval(), [5.0, 5.0])
def testWithDevice(self):
with ops.Graph().as_default() as g:
# No device.
a = constant_op.constant(3.0, name="a")
with ops.device("/cpu:0"):
b = constant_op.constant(4.0, name="b")
with ops.device("/job:worker"):
c = constant_op.constant(5.0, name="c")
gdef = g.as_graph_def()
with ops.Graph().as_default():
a2, b2, c2 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual(a.device, a2.device)
self.assertEqual(b.device, b2.device)
self.assertEqual(c.device, c2.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/task:0")):
a3, b3, c3 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/task:0", a3.device)
self.assertEqual("/task:0/device:CPU:0", b3.device) # canonicalized.
self.assertEqual(c.device + "/task:0", c3.device)
with ops.Graph().as_default():
with ops.device(device.merge_device("/job:ps")):
a4, b4, c4 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/job:ps", a4.device)
self.assertEqual("/job:ps/device:CPU:0", b4.device) # canonicalized.
self.assertEqual(c.device, c4.device) # worker overrides ps.
with ops.Graph().as_default():
with ops.device(device.merge_device("/device:GPU:0")):
a5, b5, c5 = importer.import_graph_def(
gdef, return_elements=["a", "b", "c"])
self.assertEqual("/device:GPU:0", a5.device)
self.assertEqual("/device:CPU:0", b5.device) # cpu overrides gpu.
self.assertEqual(c.device + "/device:GPU:0", c5.device)
def testWithDeviceFunctionDependingOnInputs(self):
with ops.Graph().as_default() as g:
with ops.device("/job:ps"):
v1 = constant_op.constant(1.0)
v2 = constant_op.constant(1.0)
_ = v1 + v2
_ = v1 - v2
_ = array_ops.identity(v1)
gdef = g.as_graph_def()
# We'll use the following device function to observe ops with two inputs.
ops_with_two_inputs = []
def InputCounter(op):
if len(op.inputs) == 2:
ops_with_two_inputs.append(op)
return ""
with ops.Graph().as_default() as g:
with ops.device(InputCounter):
importer.import_graph_def(gdef)
# We expect to see the add and subtract, but not identity.
self.assertEqual(2, len(ops_with_two_inputs))
def testGradient(self):
with ops.Graph().as_default() as g:
inputs = array_ops.placeholder(
dtypes.float32, shape=[None, 100], name="input")
weights = array_ops.placeholder(
dtypes.float32, shape=[100, 10], name="weights")
biases = array_ops.placeholder(dtypes.float32, shape=[10], name="biases")
activations = nn_ops.relu(
math_ops.matmul(inputs, weights) + biases, name="activations")
loss = math_ops.reduce_mean(activations, name="loss")
gdef = g.as_graph_def()
with ops.Graph().as_default() as g:
input_placeholder = array_ops.placeholder(dtypes.float32, shape=[32, 100])
weights_var = variables.Variable(
random_ops.truncated_normal([100, 10]), name="weights")
biases_var = variables.Variable(array_ops.zeros([10]), name="biases")
activations, loss = importer.import_graph_def(
gdef,
input_map={
"input:0": input_placeholder,
"weights:0": weights_var,
"biases:0": biases_var
},
return_elements=["activations:0", "loss:0"])
self.assertEqual([32, 10], activations.get_shape())
self.assertEqual([], loss.get_shape())
weights_grad, biases_grad = gradients_impl.gradients(
loss, [weights_var, biases_var])
self.assertEqual([100, 10], weights_grad.get_shape())
self.assertEqual([10], biases_grad.get_shape())
def testLargeGraph(self):
with self.test_session():
# The default message byte limit is 64M. Ours is 2G with a warning at 512.
# Adding a 130M entries float32 tensor should exceed the warning, but not
# the hard limit.
input_shape = [130, 1000, 1000]
tensor_input = np.ones(input_shape, dtype=np.float32)
t = constant_op.constant(tensor_input, shape=input_shape)
g = array_ops.identity(t)
g.eval()
def testVersion(self):
v0 = versions.GRAPH_DEF_VERSION_MIN_CONSUMER
v2 = versions.GRAPH_DEF_VERSION
v1 = (v0 + v2) // 2
for producer in v0, v1, v2:
for min_consumer in v0, v1, v2:
with ops.Graph().as_default():
a, = importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'TwoIntOutputs' }",
producer=producer,
min_consumer=min_consumer),
return_elements=["A"])
self.assertEqual(a.graph.graph_def_versions.producer, producer)
self.assertEqual(a.graph.graph_def_versions.min_consumer,
min_consumer)
def testVersionLow(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef producer version -1 below min producer %d supported "
r"by TensorFlow \S+\. Please regenerate your graph.$" %
versions.GRAPH_DEF_VERSION_MIN_PRODUCER)
# C API throws error during import, Python-only throws error during run
if ops._USE_C_API:
with self.assertRaisesRegexp(Exception, pat):
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
else:
importer.import_graph_def(self._MakeGraphDef("", producer=-1))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionHigh(self):
with ops.Graph().as_default() as g:
pat = (r"GraphDef min consumer version %d above current version %d "
r"for TensorFlow \S+\. Please upgrade TensorFlow\.$" %
(1 << 30, versions.GRAPH_DEF_VERSION))
if ops._USE_C_API:
with self.assertRaisesRegexp(ValueError, pat):
importer.import_graph_def(self._MakeGraphDef("",
min_consumer=1 << 30))
else:
# Python API only throws when graph is run
importer.import_graph_def(self._MakeGraphDef("", min_consumer=1 << 30))
x = constant_op.constant(
7) # Need at least one op to get a C++ graph generated
with self.test_session(graph=g) as sess:
with self.assertRaisesRegexp(Exception, pat):
sess.run(x)
def testVersionAppliesToOpConstruction(self):
"""These tests rely on shape fns in test_ops.cc."""
with ops.Graph().as_default():
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION - 1),
return_elements=["A"])
with ops.Graph().as_default():
with self.assertRaisesWithPredicateMatch(ValueError,
"Wrong graph version.*"):
importer.import_graph_def(
self._MakeGraphDef(
"node { name: 'A' op: 'RequiresOlderGraphVersion' }",
producer=versions.GRAPH_DEF_VERSION),
return_elements=["A"])
def testDefaultAttrsAdded(self):
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithDefaultAttr' }
"""),
return_elements=["A"])
self.assertEqual(123.0, a[0].get_attr("default_float"))
def testDefaultAttrsRemoved(self):
producer_op_list = op_def_pb2.OpList()
text_format.Merge("""
op {
name: 'OpWithFutureDefaultAttr'
attr { name: 'default_int' type: 'int' default_value { i: 456 } }
}
""", producer_op_list)
# Attr only in producer_op_list with default value gets removed.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 456 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
if ops._USE_C_API:
error_msg = "Operation 'import/A' has no attr named 'default_int'."
else:
error_msg = "No attr named 'default_int'"
with self.assertRaisesRegexp(ValueError, error_msg):
a[0].get_attr("default_int")
# Unknown attrs cannot be imported using C API. This test will eventually be
# deleted.
if not ops._USE_C_API:
# Attr only in producer_op_list with non-default value is preserved.
with ops.Graph().as_default():
a = importer.import_graph_def(
self._MakeGraphDef("""
node { name: 'A' op: 'OpWithFutureDefaultAttr'
attr { key: 'default_int' value { i: 987 } } }
"""),
return_elements=["A"],
producer_op_list=producer_op_list)
self.assertEqual(987, a[0].get_attr("default_int"))
def testFunctions(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype, dtype)
def Grad(x, y, dout1, dout2): # pylint: disable=unused-argument
# Return the inputs for simplicity of testing. The correct return value
# would be (dout1 + dout2, dout1 - dout2)
return x, y
@function.Defun(dtype, dtype, grad_func=Grad)
def FuncWithGrad(x, y):
return x + y, x - y
@function.Defun(dtypes.int32)
def ExternalTensorFunc(x):
# c must be defined in the containing graph
return x + c
@function.Defun(dtypes.int32, dtypes.int32)
def OuterFunc(x, y):
@function.Defun(dtypes.int32)
def InnerFunc(x):
return x + x
return InnerFunc(x) + y
# Create graph with function calls and export to GraphDef
with ops.Graph().as_default() as g1:
p1 = array_ops.placeholder(dtype, name="p1")
p2 = array_ops.placeholder(dtype, name="p2")
# pylint: disable=unexpected-keyword-arg
a, b = FuncWithGrad(p1, p2, name="f")
c = constant_op.constant(10, dtype=dtypes.int32)
ExternalTensorFunc(1, name="external")
OuterFunc(10, 1, name="outer")
# pylint: enable=unexpected-keyword-arg
gdef = g1.as_graph_def()
# Import GraphDef into new graph, add imported gradients, and test that
# imported functions can be run
with ops.Graph().as_default() as g2:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g2) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
# Grad function returns inputs values for testing
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
# Export the new graph and reimport to test that imported functions can be
# successfully exported/imported again
gdef = g2.as_graph_def()
with ops.Graph().as_default() as g3:
p1, p2, a, b = importer.import_graph_def(
gdef, return_elements=["p1:0", "p2:0", "f:0", "f:1"], name="")
# Create new gradient functions (in additional to the imported gradient
# functions created in g2).
grad = gradients_impl.gradients([a], [p1, p2])
with self.test_session(graph=g3) as sess:
feed_dict = {p1: 1, p2: 2}
a_val, b_val, grad_val = sess.run([a, b, grad], feed_dict=feed_dict)
self.assertEqual(a_val, 3.0)
self.assertEqual(b_val, -1.0)
self.assertEqual(grad_val, [1.0, 2.0])
self.assertEqual(sess.run("external:0"), 11)
self.assertEqual(sess.run("outer:0"), 21)
def testImportInsideDefun(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = constant_op.constant(3.0, dtype=dtypes.float32)
y = constant_op.constant(-5.0, dtype=dtypes.float32)
z = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
@function.Defun()
def TestFunc():
return importer.import_graph_def(gdef, return_elements=["z:0"])[0]
z = TestFunc()
with self.test_session():
z_val = z.eval()
self.assertEqual(z_val, -2.0)
def testImportGraphWithFunctionTwice(self):
g = ops.Graph()
with g.as_default():
@function.Defun()
def Add2(x, y):
return math_ops.add(x, y)
x = array_ops.placeholder(dtype=dtypes.float32, name="x")
y = array_ops.placeholder(dtype=dtypes.float32, name="y")
_ = Add2(x, y, name="z") # pylint: disable=unexpected-keyword-arg
gdef = g.as_graph_def()
x = random_ops.random_uniform(dtype=dtypes.float32, shape=())
y = random_ops.random_uniform(dtype=dtypes.float32, shape=())
input_map = {"x:0": x, "y:0": y}
with ops.name_scope("first"):
z1 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with ops.name_scope("second"):
z2 = importer.import_graph_def(gdef, return_elements=["z:0"],
input_map=input_map)[0]
with self.test_session() as sess:
z1_val, z2_val = sess.run((z1, z2))
self.assertAllEqual(z1_val, z2_val)
if __name__ == "__main__":
test.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.