prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# Simple implementation of a json test runner to run the test against json-py.
import sys
import os.path
import json
import types
if len(sys.argv) != 2:
print "Usage: %s input-json-file", sys.argv[0]
sys.exit(3)
input_path = sys.argv[1]
base_path = os.path.splitext(input_path)[0]
actual_path = base_path + '.actual'
rewrite_path = base_path + '.rewrite'
rewrite_actual_path = base_path + '.actual-rewrite'
def valueTreeToString( fout, value, path = '.' ):
ty = type(value)
if ty is types.DictType:
fout.write( '%s={}\n' % path )
suffix = path[-1] != '.' and '.' or ''
names = value.keys()
names.sort()
for name in names:
valueTreeToString( fout, value[name], path + suffix + name )
elif ty is types.ListType:
fout.write( '%s=[]\n' % path )
for index, childValue in zip( xrange(0,len(value)), value ):
valueTreeToString( fout, childValue, path + '[%d]' % index )
elif ty is types.StringType:
fout.write( '%s="%s"\n' % (path,value) )
elif ty is types.IntType:
fout.write( '%s=%d\n' % (path,value) )
elif ty is types.FloatType:
fout.write( '%s=%.16g\n' % (path,value) )
elif value is True:
fout.write( '%s=true\n' % path )
elif value is False:
fout.write( '%s=false\n' % path )
elif value is None:
fout.write( '%s=null\n' % path )
else:
assert False and "Unexpected value type"
def parseAndSaveValueTree( input, actual_path ):
root = json.read( input )
fout = file( actual_path, 'wt' )
va | lueTreeToString( fout, root )
fout.close()
return root
def rewriteValueTree( value, rewrite_path ):
rewrite = json.write( value )
rewrite = rewrite[1:-1] # Somehow the string is quoted ! jsonpy bug ?
file( rewrite_path, 'wt').write( rewrite + '\n' )
return rewrite
input = file( input_path, 'rt' ).read()
root = parseAndSaveValueTree( input, actual_path )
r | ewrite = rewriteValueTree( json.write( root ), rewrite_path )
rewrite_root = parseAndSaveValueTree( rewrite, rewrite_actual_path )
sys.exit( 0 )
|
rt six
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Utilities.List import fromChar
from DIRAC.Core.Utilities.ClassAd.ClassAdLight import ClassAd
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getDIRACPlatform
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
__RCSID__ = '$Id$'
def getQueuesResolved(siteDict):
"""
Get the list of queue descriptions merging site/ce/queue parameters and adding some
derived parameters.
:param dict siteDict: dictionary with configuration data as returned by Resources.getQueues() method
:return: S_OK/S_ERROR, Value dictionary per queue with configuration data updated, e.g. for SiteDirector
"""
queueFinalDict = {}
for site in siteDict:
for ce, ceDict in siteDict[site].items():
qDict = ceDict.pop('Queues')
for queue in qDict:
queueName = '%s_%s' % (ce, queue)
queueDict = qDict[queue]
queueDict['Queue'] = queue
queueDict['Site'] = site
# Evaluate the CPU limit of the queue according to the Glue convention
# To Do: should be a utility
if "maxCPUTime" in queueDict and "SI00" in queueDict:
maxCPUTime = float(queueDict['maxCPUTime'])
# For some sites there are crazy values in the CS
maxCPUTime = max(maxCPUTime, 0)
maxCPUTime = min(maxCPUTime, 86400 * 12.5)
si00 = float(queueDict['SI00'])
queueCPUTime = 60 / 250 * maxCPUTime * si00
queueDict['CPUTime'] = int(queueCPUTime)
# Tags & RequiredTags defined on the Queue level and on the CE level are concatenated
# This also converts them from a string to a list if required.
for tagFieldName in ('Tag', 'RequiredTag'):
ceTags = ceDict.get(tagFieldName, [])
if isinstance(ceTags, six.string_types):
ceTags = fromChar(ceTags)
queueTags = queueDict.get(tagFieldName, [])
if isinstance(queueTags, six.string_types):
queueTags = fromChar(queueTags)
queueDict[tagFieldName] = list(set(ceTags + queueTags))
# Some parameters can be defined on the CE level and are inherited by all Queues
for parameter in ['MaxRAM', 'NumberOfProcessors', 'WholeNode']:
queueParameter = queueDict.get(parameter, ceDict.get(parameter))
if queueParameter:
queueDict[parameter] = queueParameter
# If we have a multi-core queue add MultiProcessor tag
if queueDict.get('NumberOfProcessors', 1) > 1:
queueDict.setdefault('Tag', []).append('MultiProcessor')
queueDict['CEName'] = ce
queueDict['GridCE'] = ce
queueDict['CEType'] = ceDict['CEType']
queueDict['GridMiddleware'] = ceDict['CEType']
queueDict['QueueName'] = queue
platform = queueDict.get('Platform', ceDict.get('Platform', ''))
if not platform and "OS" in ceDict:
architecture = ceDict.get('architecture', 'x86_64')
platform = '_'.join([architecture, ceDict['OS']])
queueDict['Platform'] = platform
if platform:
result = getDIRACPlatform(platform)
if result['OK']:
queueDict['Platform'] = result['Value'][0]
queueFinalDict[queueName] = queueDict
return S_OK(queueFinalDict)
def matchQueue(jobJDL, queueDict, fullMatch=False):
"""
Match the job description to the queue definition
:param str job: JDL job description
:param bool fullMatch: test matching on all the criteria
:param dict queueDict: queue parameters dictionary
:return: S_OK/S_ERROR, Value - result of matching, S_OK if matched or
S_ERROR with the reason for no match
"""
# Check the job description validity
job = ClassAd(jobJDL)
if not job.isOK():
return S_ERROR('Invalid job description')
noMatchReasons = []
# Check job requirements to resource
# 1. CPUTime
cpuTime = job.getAttributeInt('CPUTime')
if not cpuTime:
cpuTime = 84600
if cpuTime > queueDict.get('CPUTime', 0.):
noMatchReasons.append('Job CPUTime requirement not satisfied')
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Multi-value match requirements
for parameter in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
if parameter in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[parameter]))
if valueSet and queueSet and not valueSet.intersection(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. Banned multi-value match requirements
for par in ['Site', 'GridCE', 'Platform', 'GridMiddleware',
'PilotType', 'SubmitPool', 'JobType']:
parameter = "Banned%s" % par
if par in queueDict:
valueSet = set(job.getListFromExpression(parameter))
if not valueSet:
valueSet = set(job.getListFromExpression('%ss' % parameter))
queueSet = set(fromChar(queueDict[par]))
if valueSet and queueSet and valueSet.issubset(queueSet):
valueToPrint = ','.join(valueSet)
if len(valueToPrint) > 20:
valueToPrint = "%s..." % valueToPrint[:20]
noMatchReasons.append('Job %s %s requirement not satisfied' % (parameter, valueToPrint))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. Tags
tags = set(job.getListFromExpression('Tag'))
nProc = job.getAttributeInt('NumberOfProcessors')
if nProc and nProc > 1:
tags.add('MultiProcessor')
wholeNode = job.getAttributeString('WholeNode')
if wholeNode:
tags.add('WholeNode')
queueTags = set(queueDict.get('Tags', []))
if not tags.issubset(queueTags):
noMatchReasons.append('Job Tag %s not satisfied' % ','.join(tags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 4. MultiProcessor requirements
if nProc and nProc > int(queueDict.get('NumberOfProcessors', 1)):
noMatchReasons.append('Job NumberOfProcessors %d requirement not satisfied' % nProc)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 5. RAM
ram = job.getAttributeInt('RAM')
# If MaxRAM is not specified in the queue description, assume 2GB
if ram and ram > int(queueDict.get('MaxRAM', 2048)) / 1024:
noMatchReasons.append('Job RAM %d requirement not satisfied' % ram)
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# Check resource requirements to job
# 1. OwnerGroup - rare case but still
if "OwnerGroup" in queueDict:
result = getProxyInfo(disableVOMS=True)
if not result['OK']:
return S_ERROR('No valid proxy available')
ownerGroup = result['Value']['group']
if ownerGroup != queueDict['OwnerGroup']:
noMatchReasons.append('Resource OwnerGroup %s requirement not satisfied' % queueDict['OwnerGroup'])
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 2. Required tags
requiredTags = set(queueDict.get('RequiredTags', []))
if not requiredTags.issubset(tags):
noMatchReasons.append('Resource RequiredTags %s not satisfied' % ','.join(requiredTags))
if not fullMatch:
return S_OK({'Match': False, 'Reason': noMatchReasons[0]})
# 3. RunningLimit
site = queueDict['Site']
opsHelper = Oper | ations()
result = opsHelper.getSections('JobScheduling/RunningLimit')
if result['OK'] and site in result['Value']:
result = opsHelper.getSections('JobScheduling/RunningLimit/%s' % site)
if result['OK']:
for parameter in result['Value']:
value = job.getAttributeStrin | g(parameter)
if value and opsHelper.getV |
# -*- coding: utf-8 -*-
"""
Pygments
~~~~~~~~
Pygments is a syntax highlighting package written in Python.
It is a generic syntax highlighter for general use in all kinds of software
such as forum systems, wikis or other applications that need to prettify
source code. Highlights are:
* a wide range of common languages and markup formats is supported
* special attention is paid to details, increasing quality by a fair amount
* support for new languages and formats are added easily
* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
formats that PIL supports, and ANSI sequences
* it is usable as a command-line tool and as a library
* ... and it highlights even Brainfuck!
The `Pygments tip`_ is installable with ``easy_install Pygments==dev``.
.. _Pygments tip:
http://bitbucket.org/birkenfeld/pygments-main/get/tip.zip#egg=Pygments-dev
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
__version__ = '1.6'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
import sys
from pygments.util import StringIO, BytesIO
def lex(code, lexer):
"""
Lex ``code`` with ``lexer`` and return an iterable of tokens.
"""
try:
return lexer.get_tokens(code)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method get_tokens' in err.args[0]:
raise TypeError('lex() argument must be a lexer instance, '
'not a class')
raise
def format(tokens, formatter, outfile=None):
"""
Format a tokenlist ``tokens`` with the formatter ``formatter``.
If ``outfile`` is given and a vali | d file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
try:
if not outfile:
#print formatter, 'using', formatter.encoding
realoutfile = formatter.encoding and BytesIO() or StringIO()
formatter.format(tokens, realoutfile | )
return realoutfile.getvalue()
else:
formatter.format(tokens, outfile)
except TypeError, err:
if isinstance(err.args[0], str) and \
'unbound method format' in err.args[0]:
raise TypeError('format() argument must be a formatter instance, '
'not a class')
raise
def highlight(code, lexer, formatter, outfile=None):
"""
Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
If ``outfile`` is given and a valid file object (an object
with a ``write`` method), the result will be written to it, otherwise
it is returned as a string.
"""
return format(lex(code, lexer), formatter, outfile)
if __name__ == '__main__':
from pygments.cmdline import main
sys.exit(main(sys.argv))
|
import shelve
shelve_name = "data"
def savePref(user, key, value):
d = shel | ve.open(shelve_name)
d[str(user) + '.' + str(key)] = value
d.close()
def openPref(user, key, default):
d = shelve.open(shelve_name)
if (str(user) | + '.' + str(key)) in d:
return d[str(user) + '.' + str(key)]
else:
return default
|
#!/usr/bin/python
''' this file contains functions for experimenting with the different players and for running many trials and averaging results '''
from Player import RandomPlayer
from MCPlayer import MCPlayer, PNGSPlayer, GreedyPlayer
import Board
import sys, traceback
import time
def runRandomPlayerFirstSol(fileName, size, numSamples, depth):
# numSamples and depth are useless here, just makes it more convenient to call an arbitrary function
try:
rr = Board.StandardBoard(size, size, fileName)
rPlayer = RandomPlayer(rr)
rPlayer.setTarget()
moveSequence, numMoves = rPlayer.findFirstSolutionNoTimeLimit()
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {0} moves!".format(numMoves))
return numMoves
else:
print("Invalid sequence with {0} moves!".format(numMoves))
return None
except:
print("exception in runRandomPlayerFirstSol")
traceback.print_exc(file=sys.stdout)
return None
def runMCPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
mcPlayer = MCPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
mcPlayer.setTarget()
moveSequence, numMoves = mcPlayer.findFirstSolutionNoTimeLimit(numSamples, depth)
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runMCPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def runPNGSPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
pngsPlayer = PNGSPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
pngsPlayer.setTarget()
moveSequence, numMoves, numMovesBeforePNGS, findTime, pngsTime = pngsPlayer.findFirstSolutionNoTimeLimit(numSamples, depth)
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves, numMovesBeforePNGS, findTime, pngsTime
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runPNGSPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def runGreedyPlayerFirstSol(fileName, size, numSamples, depth):
try:
rr = Board.StandardBoard(size, size, fileName)
reachableWeight = 4
LBWeight = 1
totalReachableWeight = 3
greedyPlayer = GreedyPlayer(rr, reachableWeight, LBWeight, totalReachableWeight)
greedyPlayer.setTarget()
moveSequence, numMoves, numMovesBeforePNGS, findTime, pngsTime = greedyPlayer.findFirstSolutionNoTimeLimit()
if rr.validateMoveSequence(moveSequence):
# if the move sequence
#print("valid sequence with {} moves!".format(numMoves))
return numMoves, numMovesBeforePNGS, findTime, pngsTime
else:
print("Invalid sequence with {} moves!".format(numMoves))
return None
except:
print("exception in runGreedyPlayerFirstSolution")
traceback.print_exc(file=sys.stdout)
return None
def playMultiplePNGSGames(function, numGames, fileName, size, numSamples, depth):
totalPNGSMoves = 0
totalFindMoves = 0
results = []
totalFindTime = 0
totalPNGSTime = 0
for i in xrange(numGames):
print("startGame {0}".format(i))
numMoves, numMovesBeforePNGS, findTime, pngsTime = function(fileName, size, numSamples, depth)
totalFindTime += findTime
totalPNGSTime += pngsTime
if numMoves == None:
print("Problem in function {0}".format(function))
sys.exit(-1)
else:
results.append((numMoves, numMovesBeforePNGS, findTime, pngsTime))
totalPNGSMoves += numMoves
totalFindMoves += numMovesBeforePNGS
return totalPNGSMoves/float(numGames), totalFindMoves/float(numGames), totalFindTime/float(numGames), totalPNGSTime/float(numGames), results
def playMultipleGames(function, numGames, fileName, size, numSamples, depth):
totalMoves = 0
results = []
for i in xrange(numGames):
numMoves = function(fileName, size, numSamples, depth)
if numMoves == None:
print("Problem in function {0}".format(function))
sys.exit(-1)
else:
results.append(numMoves)
totalMoves += currentMoves
return totalMoves/float(numGames), results
if __name__ == "__main__":
numGames = 10
numSamples = 10
depth = 4
fileName = "builtin4.txt"
print("Using file = {0}".format(fileName))
for depth in [3,4,5]: #,6,7,8]:
for numSamples in [14, 16, 18]: #8,10,12,14,16]:
print("Running PNGS with numGames = {2}, depth = {0} and numSamples = {1}".format(depth, numSamples, numGames))
PNGSAverage, MCAverage, findTime, pngsTime, PNGSResults = playMultiplePNGSGames(runPNGSPlayerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(PNGSDict)
print("Average Number of Moves Per Game = {0}".format(PNGSAverage))
print("Average Number of Moves Per Game Before Improvement = {0}".format(MCAverage))
print("Average findTime per game = {0}".format(findTime))
print("Average pngsTime per game = {0}".format(pngsTime))
print(PNGSResults)
print("")
'''
print("Running Greedy with numGames = {0}".format(numGames))
PNGSAverage, MCAverage, findTime, pngsTime, PNGSResults = playMultiplePNGSGames(runGreedyPlayerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(PNGSDict)
print("Average Number of Moves Per Game = {0}".format(PNGSAverage))
print("Average Number of Moves Per Game Before Improvement = {0}".format(MCAverage))
print("Average findTime per game = {0}".format(findTime))
print("Average pngsTime per game = {0}".format(pngsTime))
print(PNGSResults)
print("")
'''
'''tstart = time.clock()
print("Running Rand with numGames = {0}".format(numGames))
RandAverage, RandDict = playMultipleGames(runRandomPlayerFirstSol, numGames, fileName, 16, numSamples, depth)
#print(RandDict)
prin | t("Average Number of Moves Per Game = {0}".format(RandAverage))
print("Average time per game = {0}\n" | .format((time.clock() - tstart)/ numGames))
'''
|
eloped for
# a coding challenge from the 2014 Insight Data Engineering
# Fellows Program application.
#
# Licensed under the GNU General Public License, version 2.0
# (the "License"), this program is free software; you can
# redistribute it and/or modify it under the terms of the
# License.
#
# You should have received a copy of the License along with this
# program in the file "LICENSE". If not, you may obtain a copy of
# the License at
# http://www.gnu.org/licenses/gpl-2.0.html
#
import random
import time
MAX_DECKS = 8
def shuffleDeck(numDecks):
"""
Builds, shuffles, and returns a deck of 52 * numDecks cards
Deck is represented as a list of cards
Cards are represented as strings labeled as their rank and suit, e.g.
'7H' - 7 Hearts
'TS' - 10 Spades
"""
deck = [r+s for r in '23456789TJQKA'*numDecks for s in 'SHDC']
random.shuffle(deck)
return deck
def changeNumDecks():
"""
Prompts user to change the number of decks to use
Returns new number of decks to use
"""
numDecks = 0
while numDecks <= 0 or numDecks > MAX_DECKS:
try:
print "Enter number of decks to use (1-" + str(MAX_DECKS) + "):"
numDecks = int(raw_input("% "))
assert 0 < numDecks <= MAX_DECKS
except (ValueError, AssertionError):
print "Invalid input! Must be integer value greater than 0"
print "and less than 8"
return numDecks
def placeBet(chips):
"""
Prompts user for bet value
User input must be greater than 0 and less than chips
Fixed bet precision to one decimal place
Returns bet, rounded to nearest tenth
"""
bet = 0
while bet < 1 or bet > chips:
try:
print "How much do you wanna bet (1-" + str(chips) + ")?"
# Round bet to the nearest tenth
bet = round(float(raw_input("% ")),1)
assert 1 <= bet <= chips
except (ValueError, AssertionError):
print "Invalid input! Must be integer or float value at least 1"
print "and less than the number of available chips"
return bet
menuChoices = ['', "PLAY", "DECK", "EXIT"]
def menu():
"""
Menu
Prompts the user to choose menu option:
1 - Play
2 - Change # of decks
3 - Exit
Returns user selection
"""
choice = 0
maxChoice = len(menuChoices)-1
while choice <= 0 or choice > maxChoice:
try:
print "Menu"
print "-" * 10
print "[1] Play"
print "[2] Change # Decks"
print "[3] Exit"
choice = int(raw_input("% "))
assert 1 <=choice <= maxChoice
except (ValueError, AssertionError):
print "Invalid choice! Must be [1-" + str(maxChoice) + "]"
return menuChoices[choice]
blackjackChoices = ['', "HIT", "STAND", "DOUBLE"]
def blackjackMenu(playerCards, chips, bet):
"""
Prompts user to choose Blackjack option:
1 - Hit
2 - Stand
3 - Double Down (uses playerCards, chips, and
bet to determine if player can Double Down)
Can be extended for advanced options, i.e. split
Returns user selection
"""
choice = 0
maxChoice = len(blackjackChoices)-2
while choice <= 0 or choice > maxChoice:
try:
print "Actions:"
print "-" * 10
print "[1] Hit"
print "[2] Stand"
if len(playerCards) == 2 and chips >= bet:
"Double Down allowed"
print "[3] Double Down"
maxChoice += 1
choice = int(raw_input("% "))
assert 1 <= choice <= maxChoice
except (ValueError, AssertionError):
print "Invalid choice! Must be [1-" + str(maxChoice) + "]"
return blackjackChoices[choice]
def deal(deck):
"""
Pops and returns the first card in deck
"""
card = deck[0]
del deck[0]
return card
def rank(hand):
"""
Return the sum of the ranks in a hand
Face cards are of rank 10
Aces are of rank 11 or 1
Example: rank(['7H','AS','JD']) => 18
"""
# Extract all ranks from hand
ranks = [10 if r == 'T' or r == 'J' or r =='Q' or r == 'K' else
11 if r == 'A' else
int(r) for r,s in hand]
# While there are 11-ranked Aces in hand and hand rank is greater than 21,
while 11 in ranks and sum(ranks) > 21:
"""
Change rank of Aces to 1
one Ace at a time
until hand rank is less than 21
or until there are no more 11-ranked Aces
"""
index = ranks.index(11)
ranks[index] = 1
return sum(ranks)
def showCards(dealer, player, turn="player"):
"""
Print cards on screen
If player's turn, hide dealer's second card and rank
"""
print "=" * 20
print "Dealer Cards:", rank([dealer[0]]) if turn is "player" else rank(dealer)
for card in dealer:
if card is dealer[1] and turn is "player":
card = "--"
print card,
print
print "Player Cards:", rank(player)
for card in player:
print card,
print
print "=" * 20
def getPayout(dealer, player, chips, bet):
"""
Evaluates and compares dealer and player hands
Calculates winnings and adds to chips
Fixed chips precision to one decimal place
Returns chips rounded to nearest tenth
"""
if rank(player) > 21:
"Player bust"
print "Bust!"
elif rank(dealer) == rank(player):
"Push"
chips += bet
print "Push"
elif rank(player) == 21 and len(player) == 2:
"Player gets Blackjack"
chips += 2.5*bet
print "You got Blackjack!"
elif rank(dealer) > 21 or rank(player) > rank(dealer):
"Dealer bust or player beats dealer"
chips += 2*bet
print "You win!"
else:
"Dealer beats player"
print "You lose!"
return round(chips,1)
def blackjack(deck,chips):
"""
Play a round of (single player) Blackjack
using deck and chips. Player will be ask to
enter a valid bet value. Payout will be added
to available chips.
Return chips after payout.
"""
print "*" * 50
print "Chips:", chips
bet = placeBet(chips)
print "*" * 50
chips = chips - bet
print "Chips:", chips
print "Bet:", bet
dealerCards, playerCards = [], []
dealerRank, playerRank = 0, 0
# Deal starting cards by appending the
# first card from deck to list
playerCards.append(deal(deck))
dealerCards.append(deal(deck))
playerCards.append(deal(deck))
dealerCards.append(deal(deck))
# Player goes first
blackjack.turn = "player"
if rank(dealerCards) == 21:
"Check for dealer Blackjack"
showCards(dealerCards, playerCards, "dealer")
print "\nDealer got blackjack!"
blackjack.turn = None
elif rank(playerCards) == 21:
"Check player for Blackjack"
showCards(dealerCards, playerCards)
blackjack.turn = None
else:
showCards(dealerCards, playerCards)
while blackjack.turn is "player":
"Player's turn"
choice = blackjackMenu(playerCards, chips, | bet)
if choice == "HIT":
playerCards.append(deal(deck))
elif choice == "STAND":
blackjack.turn = "dealer"
break
elif choice == "DOUBLE":
print "Double Down! Good luck!"
chips = chips - bet
print "Chips:", chi | ps
bet = 2*bet
print "Bet:", bet
playerCards.append(deal(deck))
showCards(dealerCards, playerCards)
time.sleep(2)
blackjack.turn = "dealer"
if choice != "DOUBLE":
showCards(dealerCards, playerCards)
playerRank = rank(playerCards)
if playerRank > 21:
"Bust"
blackjack.turn = None
elif playerRank == 21:
"Twenty-One"
print "\nYou got 21!"
# Pause so player notices 21
time.sleep(2)
blackjack.turn = "dealer"
print
while blackjack.turn is "dealer":
"Dealer's turn"
showCards(dealerCards, playerCards, blackjack.turn)
dealerRank = rank(dealerCards)
if dealerRank > 21:
print "\nDealer busts!"
blackjack.turn = None
elif dealerRank < 17:
print "\nDealer hits"
dealerCards.append(deal(deck))
else:
blackjack.turn = None
# Pause between dealer moves so player can see dealer's actions
time.sleep(2)
# Compare hands and update available chips
chips = getPayout(dealerCards, playerCards, chips, bet)
time.sleep(1.5)
print
return chips
def main():
chips = 100
numDecks = changeNumDecks()
choice = ''
deck = shuffleDeck(numDecks)
while chips > 0:
"""
While there are still chips available to bet,
give the player the option to keep playing
"""
print "*" * 50
print "Chips:", chips
while choice != "PLAY":
"Display menu"
choice = menu()
if choice == "DECK":
numDecks = changeNumDecks()
print "Changed # of decks to:", numDecks
elif choice == "EXIT":
print "\nCashing out with", chips, "chips..."
print "Thanks for playing!\n"
return
if len(deck) <= 0.25*52*numDecks:
"Shuffle deck when %25 of cards are left"
de |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2015 Serpent Consulting Services Pvt. Ltd. (<http://www.serpentcs.com>)
# Copyright (C) 2016 FairCoop (<http:// | fair.coop>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public Licen | se as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Product Brand and Country filtering in Website',
'category': 'Website',
'author': 'FairCoop',
'website':'http://fair.coop',
'summary': '',
'version': '1.0',
'description': """
Allows to use product brands and countries as filtering for products in website.\n
This Module depends on product_brand module -https://github.com/OCA/product-attribute/tree/8.0/product_brand
""",
'depends': ['product_brand_custom','website_sale','web','product_custom'],
'data': [
"data/demands.xml",
"security/ir.model.access.csv",
"views/product_brand.xml",
"views/brand_page.xml",
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
(S'9d1d8dee18e2f5e4bae7551057c6c474'
p1
(ihappydoclib.pars | einfo.moduleinfo
ModuleInfo
p2
(dp3
S'_namespaces'
p4
((dp5
(dp6
tp7
sS'_import_info'
p8
(ihappydoclib.parseinfo.imports
ImportInfo
p9
(dp10
S'_named_imports'
p11
(dp12
sS'_straight_imports'
p13
(lp14
sbsS'_filename'
p15
S'Gnuplot/setup.py'
p16
sS'_docstring'
p17
S''
sS'_name'
p18 |
S'setup'
p19
sS'_parent'
p20
NsS'_comment_info'
p21
(dp22
sS'_configuration_values'
p23
(dp24
S'include_comments'
p25
I1
sS'cacheFilePrefix'
p26
S'.happydoc.'
p27
sS'useCache'
p28
I1
sS'docStringFormat'
p29
S'StructuredText'
p30
ssS'_class_info'
p31
g5
sS'_function_info'
p32
g6
sS'_comments'
p33
S''
sbt. |
#!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: magic
"""
from django.contrib import admin
from blog.models import User
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext, ugettext_lazy as _
class BlogUserAdmin(UserAdmin):
filesets = (
(None, {'fields': ('username', 'email', 'password')}),
(_('Personal info'), {'fields': ('email', 'qq', 'phone')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': {'last_login', 'd | ate_joined'}}),
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('username', 'emai | l', 'password1', 'password2'),
}),
)
admin.site.register(User, BlogUserAdmin) |
import csv
from datetime import datetime
from matplotlib import pyplot as plt
# Get dates, high, and low temperatures from file.
filename = 'sitka_weather_2017.csv'
with open(filename) as f:
reader = csv.reader(f)
header_row = next(reader)
dates, highs, lows = [], | [], []
for row in reader:
current_date = datetime.strptime(row[0], "%Y-%m-%d")
dates.append(current_date)
high = int(row[1])
highs.append(high)
low = int(row[3])
lows.append(low)
# Plot data.
fig = plt.figure(dpi=128, figsize=(10, 6))
plt.plo | t(dates, highs, c='red', alpha=0.5)
plt.plot(dates, lows, c='blue', alpha=0.5)
plt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.1)
# Format plot.
plt.title("Daily high and low temperatures - 2017", fontsize=24)
plt.xlabel('', fontsize=16)
fig.autofmt_xdate()
plt.ylabel("Temperature (F)", fontsize=16)
plt.tick_params(axis='both', which='major', labelsize=16)
plt.show()
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: full_skip
type: stdout
short_description: suppreses tasks if all hosts skipped
description:
- Use this plugin when you dont care about any output for tasks that were completly skipped
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuation
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'full_skip'
def v2_runner_on_skipped(self, result):
self.outlines = []
def v2_playbook_item_on_skipped(self, result):
self.outlines = []
def v2_runner_item_on_skipped(self, result):
self.outlines = []
def v2_runner_on_failed(self, result, ignore_error | s=False):
self.display()
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
def v2_playbook_on_task_ | start(self, task, is_conditional):
self.outlines = []
self.outlines.append("TASK [%s]" % task.get_name().strip())
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self.outlines.append("task path: %s" % path)
def v2_playbook_item_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_playbook_item_on_ok(result)
def v2_runner_on_ok(self, result):
self.display()
super(CallbackModule, self).v2_runner_on_ok(result)
def display(self):
if len(self.outlines) == 0:
return
(first, rest) = self.outlines[0], self.outlines[1:]
self._display.banner(first)
for line in rest:
self._display.display(line)
self.outlines = []
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Created as part of the StratusLab project (http://stratuslab.eu),
# co-funded by the European Commission under the Grant Agreement
# INFSO-RI-261552."
#
# Copyright (c) 2011, Centre National de la Recherche Scientifique (CNRS)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" B | ASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either | express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from stratuslab.commandbase.AuthnCommand import AuthnCommand
sys.path.append('/var/lib/stratuslab/python')
from stratuslab.CloudConnectorFactory import CloudConnectorFactory
from stratuslab.Util import printError
from stratuslab.commandbase.StorageCommand import StorageCommand
from stratuslab.volume_manager.volume_manager_factory import VolumeManagerFactory
from stratuslab.ConfigHolder import ConfigHolder
from stratuslab.Authn import AuthnFactory
from stratuslab.Exceptions import OneException
# initialize console logging
import stratuslab.api.LogUtil as LogUtil
LogUtil.get_console_logger()
class MainProgram(AuthnCommand, StorageCommand):
"""A command-line program to detach a persistent disk."""
def __init__(self):
super(MainProgram, self).__init__()
def parse(self):
self.parser.usage = '%prog [options] volume-uuid ...'
self.parser.description = '''
Detach one or more persistent volumes (disks) that were dynamically
attached to a running virtual machine. The volume-uuid arguments are
the unique identifiers of volumes to detach.
'''
self.parser.add_option('-i', '--instance', dest='instance',
help='The ID of the instance to which the volume attaches', metavar='VM_ID',
default=0, type='int')
StorageCommand.addPDiskEndpointOptions(self.parser)
AuthnCommand.addCloudEndpointOptions(self.parser)
super(MainProgram, self).parse()
self.options, self.uuids = self.parser.parse_args()
def checkOptions(self):
super(MainProgram, self).checkOptions()
if not self.uuids:
printError('Please provide at least one persistent disk UUID to detach')
if self.options.instance < 0:
printError('Please provide a VM ID on which to detach disk')
try:
self._retrieveVmNode()
except OneException, e:
printError(e)
def _retrieveVmNode(self):
credentials = AuthnFactory.getCredentials(self.options)
self.options.cloud = CloudConnectorFactory.getCloud(credentials)
self.options.cloud.setEndpoint(self.options.endpoint)
self.node = self.options.cloud.getVmNode(self.options.instance)
def doWork(self):
configHolder = ConfigHolder(self.options.__dict__, self.config or {})
configHolder.pdiskProtocol = "https"
pdisk = VolumeManagerFactory.create(configHolder)
for uuid in self.uuids:
try:
target = pdisk.hotDetach(self.options.instance, uuid)
print 'DETACHED %s from VM %s on /dev/%s' % (uuid, self.options.instance, target)
except Exception, e:
printError('DISK %s: %s' % (uuid, e), exit=False)
def main():
try:
MainProgram()
except KeyboardInterrupt:
print '\n\nExecution interrupted by the user... goodbye!'
return 0
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with | the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE- | 2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contrib layers just related to metric.
"""
from __future__ import print_function
import warnings
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.initializer import Normal, Constant
from paddle.fluid.framework import Variable
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layers import nn
__all__ = ['ctr_metric_bundle']
def ctr_metric_bundle(input, label):
"""
ctr related metric layer
This function help compute the ctr related metrics: RMSE, MAE, predicted_ctr, q_value.
To compute the final values of these metrics, we should do following computations using
total instance number:
MAE = local_abserr / instance number
RMSE = sqrt(local_sqrerr / instance number)
predicted_ctr = local_prob / instance number
q = local_q / instance number
Note that if you are doing distribute job, you should all reduce these metrics and instance
number first
Args:
input(Variable): A floating-point 2D Variable, values are in the range
[0, 1]. Each row is sorted in descending order. This
input should be the output of topk. Typically, this
Variable indicates the probability of each label.
label(Variable): A 2D int Variable indicating the label of the training
data. The height is batch size and width is always 1.
Returns:
local_sqrerr(Variable): Local sum of squared error
local_abserr(Variable): Local sum of abs error
local_prob(Variable): Local sum of predicted ctr
local_q(Variable): Local sum of q value
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32")
label = fluid.layers.data(name="label", shape=[1], dtype="int32")
predict = fluid.layers.sigmoid(fluid.layers.fc(input=data, size=1))
auc_out = fluid.contrib.layers.ctr_metric_bundle(input=predict, label=label)
"""
assert input.shape == label.shape
helper = LayerHelper("ctr_metric_bundle", **locals())
local_abserr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_sqrerr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_prob = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_q = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_pos_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
local_ins_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1])
tmp_res_elesub = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
tmp_res_sigmoid = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
tmp_ones = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1])
batch_prob = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_abserr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_sqrerr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_q = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_pos_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
batch_ins_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1])
for var in [
local_abserr, batch_abserr, local_sqrerr, batch_sqrerr, local_prob,
batch_prob, local_q, batch_q, batch_pos_num, batch_ins_num,
local_pos_num, local_ins_num
]:
helper.set_variable_initializer(
var, Constant(
value=0.0, force_cpu=True))
helper.append_op(
type="elementwise_sub",
inputs={"X": [input],
"Y": [label]},
outputs={"Out": [tmp_res_elesub]})
helper.append_op(
type="squared_l2_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_sqrerr]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_sqrerr],
"Y": [local_sqrerr]},
outputs={"Out": [local_sqrerr]})
helper.append_op(
type="l1_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_abserr]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_abserr],
"Y": [local_abserr]},
outputs={"Out": [local_abserr]})
helper.append_op(
type="reduce_sum", inputs={"X": [input]},
outputs={"Out": [batch_prob]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_prob],
"Y": [local_prob]},
outputs={"Out": [local_prob]})
helper.append_op(
type="sigmoid",
inputs={"X": [input]},
outputs={"Out": [tmp_res_sigmoid]})
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_res_sigmoid]},
outputs={"Out": [batch_q]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_q],
"Y": [local_q]},
outputs={"Out": [local_q]})
helper.append_op(
type="reduce_sum",
inputs={"X": [label]},
outputs={"Out": [batch_pos_num]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_pos_num],
"Y": [local_pos_num]},
outputs={"Out": [local_pos_num]})
helper.append_op(
type='fill_constant_batch_size_like',
inputs={"Input": label},
outputs={'Out': [tmp_ones]},
attrs={
'shape': [-1, 1],
'dtype': tmp_ones.dtype,
'value': float(1.0),
})
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_ones]},
outputs={"Out": [batch_ins_num]})
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_ins_num],
"Y": [local_ins_num]},
outputs={"Out": [local_ins_num]})
return local_sqrerr, local_abserr, local_prob, local_q, local_pos_num, local_ins_num
|
Permite filtrar y paginar los resultados.
El paginamiento es de 10 elementos por pagina y es opcional.
**Nota**: Para usar un mismo filtro con diferentes valores, se debe usar el parametro tantas veces como
sea necesario. e.g.: `?proveedor=6&proveedor=8`. El filtro se aplicara usando la disyuncion de los
valores. i.e: ... `proveedor = 6 OR proveedor = 8`. El filtro ``q`` no puede ser usado de esta forma.
**Nota**: El campo `monto_adjudicado` de la respuesta solo tiene un valor si se ha usado el filtro
``monto_adjudicado`` en el request, si no, es ``null``.
Los parametros aceptados son:
Filtros
============================== ================== ============================================================
Parámetro Ejemplo Descripción
============================== ================== ============================================================
``q`` clavos y martillos Busqueda de texto
``proveedor`` 1 Por ID de proveedor
``fecha_adjudicacion`` 20140101|20141231 Por fecha de adjudicacion de licitaciones
``organismo_adjudicador`` 1 Por ID de organismos que han les concedido licitaciones
``n_licitaciones_adjudicadas`` 10|20 Por cantidad de licitaciones adjudicadas
``monto_adjudicado`` 10000|1000000 Por monto adjudicado en licitaciones
============================== ================== ============================================================
Modificadores
============================== ================ ============================================================
Parámetro Ejemplo Descripción
============================== ================ ============================================================
``orden`` monto_adjudicado Ordenar los resultados
``pagina`` 1 Paginar y entregar la pagina solicitada
=========================== | === ================ ============================================================
"""
| # Preparar los filtros y operaciones variables
selects = [
models_api.ProveedorOrganismoCruce.empresa,
models_api.ProveedorOrganismoCruce.nombre_empresa,
models_api.ProveedorOrganismoCruce.rut_sucursal
]
wheres = []
joins = []
order_bys = []
# Busqueda de texto
q_q = req.params.get('q', None)
if q_q:
# TODO Hacer esta consulta sobre un solo indice combinado en lugar de usar dos filtros separados por OR
wheres.append(ts_match(models_api.ProveedorOrganismoCruce.nombre_empresa, q_q) | ts_match(models_api.ProveedorOrganismoCruce.rut_sucursal, q_q))
# Filtrar por proveedor
q_proveedor = req.params.get('proveedor', None)
if q_proveedor:
if isinstance(q_proveedor, basestring):
q_proveedor = [q_proveedor]
try:
q_proveedor = map(lambda x: int(x), q_proveedor)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "proveedor debe ser un entero")
wheres.append(models_api.ProveedorOrganismoCruce.empresa << q_proveedor)
# Filtrar por fecha de adjudicacion
q_fecha_adjudicacion = req.params.get('fecha_adjudicacion', None)
if q_fecha_adjudicacion:
if isinstance(q_fecha_adjudicacion, basestring):
q_fecha_adjudicacion = [q_fecha_adjudicacion]
filter_fecha_adjudicacion = []
for fechas in q_fecha_adjudicacion:
fechas = fechas.split('|')
try:
fecha_adjudicacion_min = dateutil.parser.parse(fechas[0], dayfirst=True, yearfirst=True).date() if fechas[0] else None
fecha_adjudicacion_max = dateutil.parser.parse(fechas[1], dayfirst=True, yearfirst=True).date() if fechas[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en fecha_adjudicacion deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "El formato de la fecha en fecha_adjudicacion no es correcto")
if fecha_adjudicacion_min and fecha_adjudicacion_max:
filter_fecha_adjudicacion.append((models_api.ProveedorOrganismoCruce.fecha_adjudicacion >= fecha_adjudicacion_min) & (models_api.ProveedorOrganismoCruce.fecha_adjudicacion <= fecha_adjudicacion_max))
elif fecha_adjudicacion_min:
filter_fecha_adjudicacion.append(models_api.ProveedorOrganismoCruce.fecha_adjudicacion >= fecha_adjudicacion_min)
elif fecha_adjudicacion_max:
filter_fecha_adjudicacion.append(models_api.ProveedorOrganismoCruce.fecha_adjudicacion <= fecha_adjudicacion_max)
if filter_fecha_adjudicacion:
wheres.append(reduce(operator.or_, filter_fecha_adjudicacion))
# Filtrar por organismo_adjudicador
q_organismo_adjudicador = req.params.get('organismo_adjudicador', None)
if q_organismo_adjudicador:
if isinstance(q_organismo_adjudicador, basestring):
q_organismo_adjudicador = [q_organismo_adjudicador]
try:
q_organismo_adjudicador = map(lambda x: int(x), q_organismo_adjudicador)
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "organismo_adjudicador debe ser un entero")
wheres.append(models_api.ProveedorOrganismoCruce.organismo << q_organismo_adjudicador)
# Filtrar por n_licitaciones_adjudicadas
q_n_licitaciones_adjudicadas = req.params.get('n_licitaciones_adjudicadas')
if q_n_licitaciones_adjudicadas:
if isinstance(q_n_licitaciones_adjudicadas, basestring):
q_n_licitaciones_adjudicadas = [q_n_licitaciones_adjudicadas]
filter_n_licitaciones_adjudicadas = []
for n_licitaciones in q_n_licitaciones_adjudicadas:
n_licitaciones = n_licitaciones.split('|')
try:
n_licitaciones_adjudicadas_min = int(n_licitaciones[0]) if n_licitaciones[0] else None
n_licitaciones_adjudicadas_max = int(n_licitaciones[1]) if n_licitaciones[1] else None
except IndexError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "Los valores en n_licitaciones_adjudicadas deben estar separados por un pipe [|]")
except ValueError:
raise falcon.HTTPBadRequest("Parametro incorrecto", "n_licitaciones_adjudicadas debe ser un entero")
if n_licitaciones_adjudicadas_min is not None and n_licitaciones_adjudicadas_max is not None:
filter_n_licitaciones_adjudicadas.append(
(peewee.fn.count(peewee.SQL('DISTINCT licitacion_id')) >= n_licitaciones_adjudicadas_min) &
(peewee.fn.count(peewee.SQL('DISTINCT licitacion_id')) <= n_licitaciones_adjudicadas_max)
)
elif n_licitaciones_adjudicadas_min is not None:
filter_n_licitaciones_adjudicadas.append(
peewee.fn.count(peewee.SQL('DISTINCT licitacion_id')) >= n_licitaciones_adjudicadas_min
)
elif n_licitaciones_adjudicadas_max is not None:
filter_n_licitaciones_adjudicadas.append(
peewee.fn.count(peewee.SQL('DISTINCT licitacion_id')) <= n_licitaciones_adjudicadas_max
)
if filter_n_licitaciones_adjudicadas:
filter_n_licitaciones_adjudicadas = reduce(operator.or_, f |
<div style="clear:both"></div>
<form action="%(siteurl)s/search" method="get"><div align="center">
""" % { 'siteurl' : CFG_SITE_URL }
# middle table cell: print beg/next/prev/end arrows:
if not middle_only:
out += """<td class="searchresultsboxheader" align="center">
%(recs_found)s """ % {
'recs_found' : _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>')
}
else:
out += "<small>"
if nb_found > rg:
out += "" + cgi.escape(collection_name) + " : " + _("%s records found") % ('<strong>' + self.tmpl_nice_number(nb_found, ln) + '</strong>') + " "
if nb_found > rg: # navig.arrows are needed, since we have many hits
query = {'p': p, 'f': f,
'cc': collection,
'sf': sf, 'so': so,
'sp': sp, 'rm': rm,
'of': of, 'ot': ot,
'aas': aas, 'ln': ln,
'p1': p1, 'p2': p2, 'p3': p3,
'f1': f1, 'f2': f2, 'f3': f3,
'm1': m1, 'm2': m2, 'm3': m3,
'op1': op1, 'op2': op2,
'sc': 0,
'd1y': d1y, 'd1m': d1m, 'd1d': d1d,
'd2y': d2y, 'd2m': d2m, 'd2d': d2d,
'dt': dt,
}
# @todo here
def img(gif, txt):
return '<img src="%(siteurl)s/img/%(gif)s.gif" alt="%(txt)s" border="0" />' % {
'txt': txt, 'gif': gif, 'siteurl': CFG_SITE_URL}
if jrec - rg > 1:
out += create_html_link(self.build_search_url(query, jrec=1, rg=rg),
{}, img('sb', _("begin")),
{'class': 'img'})
if jrec > 1:
out += create_html_link(self.build_search_url(query, jrec=max(jrec - rg, 1), rg=rg),
{}, img('sp', _("previous")),
{'class': 'img'})
if jrec + rg - 1 < nb_found:
out += "%d - %d" % (jrec, jrec + rg - 1)
else:
out += "%d - %d" % (jrec, nb_found)
if nb_found >= jrec + rg:
out += create_html_link(self.build_search_url(query,
jrec=jrec + rg,
rg=rg),
{}, img('sn', _("next")),
{'class':'img'})
if nb_found >= jrec + rg + rg:
out += create_html_link(self.build_search_url(query,
jrec=nb_found - rg + 1,
rg=rg),
{}, img('se', _("end")),
{'class': 'img'})
# still in the navigation part
cc = collection
sc = 0
for var in ['p', 'cc', 'f', 'sf', 'so', 'of', 'rg', 'aas', 'ln', 'p1', 'p2', 'p3', 'f1', 'f2', 'f3', 'm1', 'm2', 'm3', 'op1', 'op2', 'sc', 'd1y', 'd1m', 'd1d', 'd2y', 'd2m', 'd2d', 'dt']:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
for var in ['ot', 'sp', 'rm']:
if vars()[var]:
out += self.tmpl_input_hidden(name=var, value=vars()[var])
if pl_in_url:
fieldargs = cgi.parse_qs(pl_in_url)
for fieldcode in all_fieldcodes:
# get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
out += self.tmpl_input_hidden(name=fieldcode, value=val)
out += """ %(jump)s <input type="text" name="jrec" size="4" value="%(jrec)d" />""" % {
'jump' : _("jump to record:"),
'jrec' : jrec,
}
if not middle_only:
out += "</td>"
else:
out += "</small>"
# right table cell: cpu time info
if not middle_only:
if cpu_time > -1:
out += """<td class="searchresultsboxheader" align="right"><small>%(time)s</small> </td>""" % {
'time' : _("Search took %s seconds.") % ('%.2f' % cpu_time),
}
out += "</tr></table>"
else:
out += "</div>"
out += "</form>"
return out
def tmpl_print_hosted_search_info(self, ln, middle_only,
collection, collection_name, collection_id,
aas, sf, so, rm, rg, nb_found, of, ot, p, f, f1,
f2, f3, m1, m2, m3, op1, op2, p1, p2,
p3, d1y, d1m, d1d, d2y, d2m, d2d, dt,
all_fieldcodes, cpu_time, pl_in_url,
jrec, sc, sp):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page.
Parameters:
- 'ln' *string* - The language to display
- 'middle_only' *bool* - Only display parts of the interface
- 'collection' *string* - the collection name
- 'collection_name' *string* - the i18nized current collection name
- 'aas' *bool* - if we display the advanced search interface
- 'sf' *string* - the currently selected sort format
- 'so' *string* - the currently selected sort order ("a" or "d")
- 'rm' *string* - selected ranking method
- 'rg' *int* - selected results/page
- 'nb_found' *int* - number of results found
- 'of' *string* - the selected output format
- 'ot' *string* - hidden values
- 'p' *string* - Current search words
- 'f' *string* - the fields in which the search was done
- 'f1, f2, f3, m1, m2, m3, p1, p2, p3, op1, op2' *strings* - the search parameters
- 'jrec' *int* - number of first record on this page
- 'd1y, d2y, d1m, d2m, d1d, d2d' *int* - the search between dates
- 'dt' *string* the dates' type (creation date, modification date)
- 'all_fieldcodes' *array* - all the available fie | lds
- 'cpu_time' *float* - the time of the query in seconds
"""
# load the right message language
_ = gettext_set_language(ln)
out = | ""
# left table cells: print collection name
if not middle_only:
out += '''
<a name="%(collection_id)s"></a>
<form action="%(siteurl)s/search" method="get">
<table class="searchresultsbox"><tr><td class="searchresultsboxheader" align="left">
<strong><big>%(collection_link)s</big></strong></td>
''' % {
'collection_id': collection_id,
'siteurl' : CFG_SITE_URL,
'collection_link': create_html_link(self.build_search_interface_url(c=collection, aas=aas, ln=ln),
{}, cgi.escape(collection_name))
}
else:
out += """
<form action="%(siteurl)s/search" method="get"><div align="center">
""" % { 'siteurl' : CFG_SITE_URL }
# middle table cell: print beg/next/prev/end arrows:
if not middle_only:
# in case we have a hosted collection that timed out do not print its number o |
from django import forms
class SelectModelForm(forms.F | orm):
| app_label = forms.CharField(
label="App label",
required=True)
model_name = forms.CharField(
label="Model name",
required=True)
|
er.
* Mark this instance as closed (for the purpose of future `open`
and `close` calls).
"""
if not self.is_open:
return
if self.pidfile is not None:
# Follow the interface for telling a context manager to exit,
# <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
self.pidfile.__exit__(None, None, None)
self._is_open = False
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager exit point. """
self.close()
def terminate(self, signal_number, stack_frame):
""" Signal handler for end-process signals.
:Return: ``None``
Signal handler for the ``signal.SIGTERM`` signal. Performs the
following step:
* Raise a ``SystemExit`` exception explaining the signal.
"""
exception = SystemExit(
"Terminating on signal %(signal_number)r" % vars())
raise exception
def _get_exclude_file_descriptors(self):
""" Return the set of file descriptors to exclude closing.
Returns a set containing the file descriptors for the
items in `files_preserve`, and also each of `stdin`,
`stdout`, and `stderr`:
* If the item is ``None``, it is omitted from the return
set.
* If the item has a ``fileno()`` method, that method's
return value is in the return set.
* Otherwise, the item is in the return set verbatim.
"""
files_preserve = self.files_preserve
if files_preserve is None:
files_preserve = []
files_preserve.extend(
item for item in [self.stdin, self.stdout, self.stderr]
if hasattr(item, 'fileno'))
exclude_descriptors = set()
for item in files_preserve:
if item is None:
continue
if hasattr(item, 'fileno'):
exclude_descriptors.add(item.fileno())
else:
exclude_descriptors.add(item)
return exclude_descriptors
def _make_signal_handler(self, target):
""" Make the signal handler for a specified target object.
If `target` is ``None``, returns ``signal.SIG_IGN``. If
`target` is a string, returns the attribute of this
instance named by that string. Otherwise, returns `target`
itself.
"""
| if target is None:
result = signal.SIG_IGN
elif isinstance(target, str):
name = target
result = getattr(self, name)
else:
result = target
return result
def _make_signal_handler_map(self):
""" Make the map from signals to handlers for this instance.
Constructs a map from signal numbers | to handlers for this
context instance, suitable for passing to
`set_signal_handlers`.
"""
signal_handler_map = dict(
(signal_number, self._make_signal_handler(target))
for (signal_number, target) in self.signal_map.items())
return signal_handler_map
def change_working_directory(directory):
""" Change the working directory of this process.
"""
try:
os.chdir(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change working directory (%s)" % exc)
raise error
def change_root_directory(directory):
""" Change the root directory of this process.
Sets the current working directory, then the process root
directory, to the specified `directory`. Requires appropriate
OS privileges for this process.
"""
try:
os.chdir(directory)
os.chroot(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change root directory (%s)" % exc)
raise error
def change_file_creation_mask(mask):
""" Change the file creation mask for this process.
"""
try:
os.umask(mask)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask (%s)" % exc)
raise error
def change_process_owner(uid, gid):
""" Change the owning UID and GID of this process.
Sets the GID then the UID of the process (in that order, to
avoid permission errors) to the specified `gid` and `uid`
values. Requires appropriate OS privileges for this process.
"""
try:
os.setgid(gid)
os.setuid(uid)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask (%s)" % exc)
raise error
def prevent_core_dump():
""" Prevent this process from generating a core dump.
Sets the soft and hard limits for core dump size to zero. On
Unix, this prevents the process from creating core dump
altogether.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value
resource.getrlimit(core_resource)
except ValueError as exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit (%s)" % exc)
raise error
# Set hard and soft limits to zero, i.e. no core dump at all
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
def detach_process_context():
""" Detach the process context from parent and session.
Detach from the parent process and session group, allowing the
parent to exit while this process continues running.
Reference: “Advanced Programming in the Unix Environment”,
section 13.3, by W. Richard Stevens, published 1993 by
Addison-Wesley.
"""
def fork_then_exit_parent(error_message):
""" Fork a child process, then exit the parent process.
If the fork fails, raise a ``DaemonProcessDetachError``
with ``error_message``.
"""
try:
pid = os.fork()
if pid > 0:
# pylint: disable=W0212
os._exit(0)
except OSError as exc:
error = DaemonProcessDetachError(
"%(error_message)s: [%(exc_errno)d] %(exc_strerror)s" % {
'error_message': error_message,
'exc_errno': exc.errno,
'exc_strerror': exc.strerror})
raise error
fork_then_exit_parent(error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(error_message="Failed second fork")
def is_process_started_by_init():
""" Determine if the current process is started by `init`.
The `init` process has the process ID of 1; if that is our
parent process ID, return ``True``, otherwise ``False``.
"""
result = False
init_pid = 1
if os.getppid() == init_pid:
result = True
return result
def is_socket(fd):
""" Determine if the file descriptor is a socket.
Return ``False`` if querying the socket type of `fd` raises an
error; otherwise return ``True``.
"""
result = False
file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
try:
file_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error as exc:
exc_errno = exc.args[0]
if exc_errno == errno.ENOTSOCK:
# Socket operation on non-socket
pass
else:
# Some other socket error
result = True
else:
# No error getting socket type
result = True
return result
def is_process_started_by_superserver():
""" Determine if the current process is started by the superserver.
The internet superserver creates a network socket, and
attaches it to the standard streams of the child process. If
that is the case for this p |
= logging.getLogger("zentral.conf.config")
class Proxy:
pass
class EnvProxy(Proxy):
def __init__(self, name):
self._name = name
def get(self):
return os.environ[self._name]
class ResolverMethodProxy(Proxy):
def __init__(self, resolver, proxy_type, key):
if proxy_type == "file":
self._method = resolver.get_file_content
elif proxy_type == "param":
self._method = resolver.get_parameter_value
elif proxy_type == "secret":
self._method = resolver.get_secret_value
elif proxy_type == "bucket_file":
self._method = resolver.get_bucket_file
else:
raise ValueError("Unknown proxy type %s", proxy_type)
self._key = key
def get(self):
return self._method(self._key)
class JSONDecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return json.loads(self._child_proxy.get())
class Base64DecodeFilter(Proxy):
def __init__(self, child_proxy):
self._child_proxy = child_proxy
def get(self):
return base64.b64decode(self._child_proxy.get())
class ElementFilter(Proxy):
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_proxy
def get(self):
return self._child_proxy.get()[self._key]
class Resolver:
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None
def _get_or_create_cached_value(self, key, getter, ttl=None):
# happy path
try:
expiry, value = self._cache[key]
except KeyError:
pass
else:
if expiry is None or time.time() < expiry:
logger.debug("Key %s from cache", key)
return value
logger.debug("Cache for key %s has expired", key)
# get value
value = getter()
if ttl:
expiry = time.time() + ttl
else:
expiry = None
self._cache[key] = (expiry, value)
logger.debug("Set cache for key %s", key)
return value
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter)
def get_secret_value(self, name):
cache_key = ("SECRET", name)
if not self._secret_client:
self._secret_client = get_secret_client()
def getter():
return self._secret_client.get(name)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter)
def get_parameter_value(self, key):
cache_key = ("PARAM", key)
if not self._param_client:
self._param_client = get_param_client()
def getter():
return self._param_client.get(key)
return self._get_or_create_cached_value(cache_key, getter, ttl=600)
class BaseConfig:
PROXY_VAR_RE = re.compile(
r"^\{\{\s*"
r"(?P<type>bucket_file|env|file|param|secret)\:(?P<key>[^\}\|]+)"
r"(?P<filters>(\s*\|\s*(jsondecode|base64decode|element:[a-zA-Z_\-/0-9]+))*)"
r"\s*\}\}$"
)
custom_classes = {}
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver
def _make_proxy(self, key, match):
proxy_type = match.group("type")
key = match.group("key").strip()
if proxy_type == "env":
proxy = EnvProxy(key)
else:
proxy = ResolverMethodProxy(self._resolver, proxy_type, key)
filters = [f for f in [rf.strip() for rf in match.group("filters").split("|")] if f]
for filter_name in filters:
if filter_name == "jsondecode":
proxy = JSONDecodeFilter(proxy)
elif filter_name == "base64decode":
proxy = Base64DecodeFilter(proxy)
elif filter_name.star | tswith("element:"):
key = filter_name.split(":", 1)[-1]
proxy = ElementFilter(key, proxy)
else:
raise ValueError("Unknown filter %s", filter_name)
return proxy
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, Config | Dict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value
def _to_python(self, value):
if isinstance(value, Proxy):
return value.get()
else:
return value
def __len__(self):
return len(self._collection)
def __delitem__(self, key):
del self._collection[key]
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value)
def pop(self, key, default=None):
value = self._collection.pop(key, default)
if isinstance(value, Proxy):
value = value.get()
return value
class ConfigList(BaseConfig):
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value))
def __getitem__(self, key):
value = self._collection[key]
if isinstance(key, slice):
slice_repr = ":".join(str("" if i is None else i) for i in (key.start, key.stop, key.step))
logger.debug("Get /%s[%s] config key", "/".join(self._path), slice_repr)
return [self._to_python(item) for item in value]
else:
logger.debug("Get /%s[%s] config key", "/".join(self._path), key)
return self._to_python(value)
def __iter__(self):
for element in self._collection:
yield self._to_python(element)
def serialize(self):
s = []
for v in self:
if isinstance(v, BaseConfig):
v = v.serialize()
s.append(v)
return s
class ConfigDict(BaseConfig):
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value)
def __getitem__(self, key):
logger.debug("Get /%s config key", "/".join(self._path + (key,)))
value = self._collection[key]
return self._to_python(value)
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value
def __iter__(self):
yield from self._collection
def keys(self):
return self._collection.keys()
def values(self):
for value in self._collection.values():
yield self._to_python(value)
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value)
def clear(self):
return self._collection.clear()
def setdefault(self, key, default=None):
return self._collection.setdefault(key, self._from_python(key, default))
def pop(self, key, default=None):
value = |
#! /use/bin/env python
# -*- coding: utf-8 -*-
'''/* generateSintagmaProblem_6WP.py
*
* Copyright (C) 2016 Gian Paolo Ciceri
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author:
* gian paolo ciceri <gp.ciceri@gmail.com>
*
*
* Release:
* 2016.08.24 - initial release.
*
*/
'''
import random
import time
import locale
import re
from sixWeeksExercises import *
from latinGrammarRules import *
locale.setlocale(locale.LC_ALL, '')
NUMEX = (48*6)+12
NUMTRY = 10000
ID = "6WP"
PERIOD = "PAG11"
PRE = ID + '-' + PERIOD + "_esercizi_"
PRE_NAME = 'EX_' + ID + '_' + PERIOD
PREAMBLE = '''#
# -*- coding: utf-8 -*-'''
RUNCODE = '''if __name__ == "__main__":
import sys
for item in %s:
for word in item[3:6]:
print(word.encode(sys.stdin.encoding).decode(sys.stdout.encoding), end=" ")
print()
''' % (PRE_NAME,)
NOW = time.strftime('%Y%m%d-%H00')
TODAY = time.strftime('%Y.%m.%d')
PROBLEMI = PRE + NOW + ".py"
GENERE = LATIN_GENDERS
PERSONE = LATIN_PERSONS
CASI = LATIN_CASES_VOCDECL
REGOLA = (CASI,)
LESSICO = SIXWEEKS_PAG11_VOC_EX1
NUMLEX = 1
if __name__ == "__main__":
random.seed()
item = 0
num = 0
problemDict = dict()
exDict = dict()
fields = '("<LAT>", "<ITA>", "<ING>", "%s", "%s", "%s"),'
while (item < NUMEX and num < NUMTRY):
num += 1
regola = list()
esempio = list()
for rule in REGOLA:
theRule = random.choice(rule)
regola.append(theRule[0])
esempio.append(theRule[1])
lessico = list()
while 1:
word = random.choice(LESSICO)[0]
if word not in lessico and len(lessico) < NUMLEX:
lessico.append(word)
if len(lessico) == NUMLEX:
break
regola_string = ', '.join(regola)
esempio_string = ', '.join(esempio)
lessico_string = | ', '.join(lessico)
voceX = fields % (regola_string, | esempio_string, lessico_string)
# con questo "comprimo" i doppi spazi in uno solo
voce = voceX.replace(" ", " ")
idItem = "%s.%d" % (ID, item)
try:
p = problemDict[(voce,)]
print(item, num, "DOPPIO:", voce)
except KeyError:
problemDict[(voce)] = (voce,)
exDict[item] = (voce, idItem)
item += 1
pf = open(PROBLEMI, "wb")
pf.write(bytes(PREAMBLE + "\n\n", 'UTF-8'))
pf.write(bytes(PRE_NAME + " = [\n", 'UTF-8'))
for pitem in sorted(exDict.keys()):
problema = exDict[pitem][0]
pf.write(bytes(problema + "\n", 'UTF-8'))
pf.write(bytes("]\n", 'UTF-8'))
pf.write(bytes("\n\n" + RUNCODE + "\n\n", 'UTF-8'))
pf.close()
|
"network-instances",
"network-instance",
"mpls",
"lsps",
"constrained-path",
"tunnels",
"tunnel",
"bandwidth",
"auto-bandwidth",
"underflow",
"state",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/enabled (boolean)
YANG Description: enables bandwidth underflow
adjustment on the lsp
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_ | path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: enables bandwidth underflow
ad | justment on the lsp
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_underflow_threshold(self):
"""
Getter method for underflow_threshold, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/underflow_threshold (oc-types:percentage)
YANG Description: bandwidth percentage change to trigger
and underflow event
"""
return self.__underflow_threshold
def _set_underflow_threshold(self, v, load=False):
"""
Setter method for underflow_threshold, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/underflow_threshold (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_underflow_threshold is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_underflow_threshold() directly.
YANG Description: bandwidth percentage change to trigger
and underflow event
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="underflow-threshold",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """underflow_threshold must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="underflow-threshold", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=False)""",
}
)
self.__underflow_threshold = t
if hasattr(self, "_set"):
self._set()
def _unset_underflow_threshold(self):
self.__underflow_threshold = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="underflow-threshold",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=False,
)
def _get_trigger_event_count(self):
"""
Getter method for trigger_event_count, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/trigger_event_count (uint16)
YANG Description: number of consecutive underflow sample
events needed to trigger an underflow adjustment
"""
return self.__trigger_event_count
def _set_trigger_event_count(self, v, load=False):
"""
Setter method for trigger_event_count, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/bandwidth/auto_bandwidth/underflow/state/trigger_event_count (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_trigger_event_count is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_trigger_event_count() directly.
YANG Description: number of consecutive underflow sample
events needed to trigger an underflow adjustment
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 |
fro | m django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^robots.txt', 'django.views.generic.simple.direct_to_templ | ate', {'template': 'robots.txt'}),
)
|
# -*- coding: utf-8 -*-
import hashlib
import binascii
from thrift.transport.THttpClient import THttpClient
from thrift.protocol.TBinaryProtocol import TBinaryProtocol
from evernote.edam.userstore import UserStore
from evernote.edam.notestore import NoteStore
import evernote.edam.type.ttypes as Types
import evernote.edam.error.ttypes as Errors
from evernote.api.client import EvernoteClient
from .settings import EVERNOTE_NOTEBOOK
import logging
class Sink(object):
pass
class EvernoteSink(Sink):
def __init__(self, token, sandbox=False):
"""Initialize evernote connection.
Client connection handle is assigned to the client property.
Two properties user_store and note_store are provided for the convenience.
"""
self.token = token
self.client = EvernoteClient(token=self.token, sandbox=sandbox)
self.user_store = self.client.get_user_store()
self.note_store = self.client.get_note_store()
def image_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = item.content_type
resource.data = data
return resource
def pdf_resource(self, item):
#FIXME create pdf resource
md5 = hashlib.md5()
md5.update(item.content)
hashvalue = md5.digest()
data = Types.Data()
data.size = len(item.content) #FIXME better ways of doing this calculation?
data.bodyHash = hashvalue
data.body = item.content
resource = Types.Resource()
resource.mime = 'application/pdf'
resource.data = data
return resource
def note_attribute(self, source_url=''):
attributes = Types.NoteAttributes()
attributes.sourceURL = source_url
return attributes
def create_note(self, title, content, notebook_name='', tags='', attributes=None, resources=None):
note = Types.Note()
note.title = title
if attributes:
note.attributes = attributes
if tags:
note.tagNames = [t.encode('utf-8', 'xmlcharrefreplace') for t in tags.split()] # Assuming no spaces in tags
logging.debug(note.tagNames)
if notebook_name:
notebooks = self.note_store.listNotebooks(self.token)
for notebook in notebooks:
if notebook.name == notebook_name:
note.notebookGuid = notebook.guid
break
else:
pass # create a note in default notebook
note.content = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">
<en-note>{}""".format(content.encode('utf-8', 'xmlcharrefreplace'))
if resources:
note.resources = resources
for r in resources:
note.content += """<en-media type="{}" hash="{}"/>""".format(r.mime, binascii.hexlify(r.data.bodyHash))
note.content += "</en-note>"
logging.debug(note.content)
created_note = self.note_stor | e.createNote(self.token, note)
ret | urn created_note
def push(self, item):
kwargs = {
'title': item.title.encode('utf-8', 'xmlcharrefreplace'),
'content': item.body,
'tags': item.tags,
'notebook_name': EVERNOTE_NOTEBOOK,
'attributes': self.note_attribute(item.url),
}
if item.itemtype == 'PDF':
resource = self.pdf_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'image':
resource = self.image_resource(item)
kwargs['resources'] = [resource]
elif item.itemtype == 'HTML':
#FIXME check for image inside and create image resources
kwargs['content'] = item.content
elif item.itemtype == 'text':
kwargs['content'] = item.content
else:
# XXX Assuming plaintext type
# Should I raise exception for unknown items?
item.itemtype = 'text'
self.create_note(**kwargs)
class Database(Sink):
pass
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from fabric.api import reboot, sudo, settings
logging.basicConfig(level=logging.INFO)
def ssserver(port, password, method):
try:
sudo('hash yum')
sudo('hash python')
sudo('yum -y update 1>/dev/null')
sudo('yum -y install python-setuptools 1>/dev/null')
sudo('yum -y install m2crypto 1>/dev/null')
sudo('easy_install pip 1>/dev/null')
sudo('pip install shadowsocks 1>/dev/null')
sudo('hash ssserver')
sudo("sed -i '/ssserver/d' /etc/rc.d/rc.local")
cmd = '/usr/bin/python /usr/bin/ssserver -p %s -k %s -m %s --user nobody -d | start' % \
(port, password, method)
sudo("sed -i '$a %s' /etc/rc.d/rc.local" % cmd)
sudo('chmod +x /etc/rc.d/rc.local')
sudo('firewall-cmd --zone=public --add-port=%s/tcp --permanent' % port)
with settings(warn_only=True):
reboot()
sudo('ps -ef | grep ssserver')
return True
except BaseException as e:
logging.error(e)
| return False
def sslocal(server_addr, server_port, server_password, method, local_port):
try:
sudo('hash yum')
sudo('hash python')
sudo('yum -y update 1>/dev/null')
sudo('yum -y install python-setuptools 1>/dev/null')
sudo('yum -y install m2crypto 1>/dev/null')
sudo('easy_install pip 1>/dev/null')
sudo('pip install shadowsocks 1>/dev/null')
sudo('hash sslocal')
sudo("sed -i '/sslocal /d' /etc/rc.d/rc.local")
cmd = '/usr/bin/python /usr/bin/sslocal -s %s -p %s -k %s -m %s -b 0.0.0.0 -l %s --user nobody -d start' % \
(server_addr, server_port, server_password, method, local_port)
sudo("sed -i '$a %s' /etc/rc.d/rc.local" % cmd)
sudo('chmod +x /etc/rc.d/rc.local')
sudo('firewall-cmd --zone=public --add-port=%s/tcp --permanent' % local_port)
with settings(warn_only=True):
reboot()
sudo('ps -ef | grep sslocal')
return True
except BaseException as e:
logging.error(e)
return False
|
"""
Compute a harmonic 1-cochain basis for a square with 4 holes.
"""
from numpy import asarray, eye, outer, inner, dot, vstack
from numpy.random import seed, rand
from numpy.linalg import norm
from scipy.sparse.linalg import cg
from pydec import d, delta, simplicial_complex, read_mesh
def hodge_decomposition(omega):
"""
For a given p-cochain \omega there is a unique decomposition
\omega = d(\alpha) + \delta(\beta) (+) h
for p-1 cochain \alpha, p+1 cochain \beta, and harmonic p-cochain h.
This function returns (non-unique) representatives \beta, \gamma, and h
which satisfy the equation above.
Example:
#decompose a random 1-cochain
sc = SimplicialComplex(...)
omega = sc.get_cochain(1)
omega.[:] = rand(*omega.shape)
(alpha,beta,h) = hodge_decomposition(omega)
"""
sc = omega.complex |
p = omega.k
alpha = sc.get_cochain(p - 1)
beta = sc.get_cochain(p + 1)
# Solve for alpha
A = delta(d(sc.get_cochain_basis(p - 1))).v
b = delta(omega).v
alpha.v = cg( A, b, tol=1e-8 )[0]
# Solve for beta
A = d(delta(sc.get_cochain_basis(p + 1))).v
b = d(omega).v
beta.v = cg( A, b, tol=1e-8 )[0]
# Solve for h |
h = omega - d(alpha) - delta(beta)
return (alpha,beta,h)
def ortho(A):
"""Separates the harmonic forms stored in the rows of A using a heuristic
"""
A = asarray(A)
for i in range(A.shape[0]):
j = abs(A[i]).argmax()
v = A[:,j].copy()
if A[i,j] > 0:
v[i] += norm(v)
else:
v[i] -= norm(v)
Q = eye(A.shape[0]) - 2 * outer(v,v) / inner(v,v)
A = dot(Q,A)
return A
seed(0) # make results consistent
# Read in mesh data from file
mesh = read_mesh('mesh_example.xml')
vertices = mesh.vertices
triangles = mesh.elements
# remove some triangle from the mesh
triangles = triangles[list(set(range(len(triangles))) - set([30,320,21,198])),:]
sc = simplicial_complex((vertices,triangles))
H = [] # harmonic forms
# decompose 4 random 1-cochains
for i in range(4):
omega = sc.get_cochain(1)
omega.v[:] = rand(*omega.v.shape)
(beta,gamma,h) = hodge_decomposition(omega)
h = h.v
for v in H:
h -= inner(v,h) * v
h /= norm(h)
H.append(h)
H = ortho(vstack(H))
# plot the results
from pylab import figure, title, quiver, axis, show
from pydec import triplot, simplex_quivers
for n,h in enumerate(H):
figure()
title('Harmonic 1-cochain #%d' % n)
triplot(vertices,triangles)
bases,dirs = simplex_quivers(sc,h)
quiver(bases[:,0],bases[:,1],dirs[:,0],dirs[:,1])
axis('equal')
show()
|
# pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import S | imLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("aclui.dll")
prototypes = \
{
#
'CreateSecurityPage' | : SimTypeFunction([SimTypeBottom(label="ISecurityInformation")], SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), arg_names=["psi"]),
#
'EditSecurity': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi"]),
#
'EditSecurityAdvanced': SimTypeFunction([SimTypePointer(SimTypeInt(signed=True, label="Int"), label="IntPtr", offset=0), SimTypeBottom(label="ISecurityInformation"), SimTypeInt(signed=False, label="SI_PAGE_TYPE")], SimTypeInt(signed=True, label="Int32"), arg_names=["hwndOwner", "psi", "uSIPage"]),
}
lib.set_prototypes(prototypes)
|
##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
r'''How to conditionalize specific directives
There is a "condition" attribute in the
"http://namespaces.zope.org/zcml" namespace which is honored on all
elements in ZCML. The value of the attribute is an expression
which is used to determine if that element and its descendents are
used. If the condition is true, processing continues normally,
otherwise that element and its descendents are ignored.
Currently the expression is always of the form "have featurename", and it
checks for the presence of a <meta:provides feature="featurename" />.
Our demonstration uses a trivial registry; each registration consists
of a simple id inserted in the global `registry` in this module. We
can checked that a registration was made by checking whether the id is
present in `registry`.
We start by loading the example ZCML file, *conditions.zcml*::
>>> import zope.configuration.tests
>>> import zope.configuration.xmlconfig
>>> context = zope.configuration.xmlconfig.file("conditions.zcml",
... zope.configuration.tests)
To show that our sample directive works, we see that the unqualified
registration was successful::
>>> "unqualified.registration" in registry
True
When the expression specified with ``zcml:condition`` evaluates to
true, the el | ement it is attached to and all contained elements (not
otherwise conditioned) should be processed normally::
>>> "direct.true.condition" in registry
True
>>> "nested.true.condition" i | n registry
True
However, when the expression evaluates to false, the conditioned
element and all contained elements should be ignored::
>>> "direct.false.condition" in registry
False
>>> "nested.false.condition" in registry
False
Conditions on container elements affect the conditions in nested
elements in a reasonable way. If an "outer" condition is true, nested
conditions are processed normally::
>>> "true.condition.nested.in.true" in registry
True
>>> "false.condition.nested.in.true" in registry
False
If the outer condition is false, inner conditions are not even
evaluated, and the nested elements are ignored::
>>> "true.condition.nested.in.false" in registry
False
>>> "false.condition.nested.in.false" in registry
False
Now we need to clean up after ourselves::
>>> del registry[:]
'''
__docformat__ = "reStructuredText"
import zope.interface
import zope.schema
import zope.testing.doctest
class IRegister(zope.interface.Interface):
"""Trivial sample registry."""
id = zope.schema.Id(
title=u"Identifier",
description=u"Some identifier that can be checked.",
required=True,
)
registry = []
def register(context, id):
context.action(discriminator=('Register', id),
callable=registry.append,
args=(id,)
)
def test_suite():
return zope.testing.doctest.DocTestSuite()
|
from django.core.exc | eptions import ValidationError
from amweekly.slack.tests.factories import SlashCommandFactory
import pytest
pytest.mark.unit
def test_slash_comman | d_raises_with_invalid_token(settings):
settings.SLACK_TOKENS = ''
with pytest.raises(ValidationError):
SlashCommandFactory()
|
from mitxmako.shortcuts import render_to_response
from django.http import HttpResponse
from .models import poll_store
from datetime import datetime
from django.utils import timezone
import json
import urllib2
from math import floor
def poll_form_view(request, poll_type=None):
if poll_type:
return render_to_response('polls/' + poll_type + '_form.html')
def poll_form_submit(request, poll_type):
try:
poll_id = request.POST.get('poll_id')
question = request.POST.get('question')
answers = get_post_array(request.POST, 'answers')
expiration = request.POST.get('expiration', '')
expiration_object = None
if expiration:
expiration_object = datetime.strptime(expiration, '%m/%d/%Y')
poll_connect = poll_store()
poll_connect.set_poll(poll_type, poll_id, question, answers, expiration_object)
response = {'Success': True}
except Exception as e:
response = {'Success': False, 'Error': 'Error: {0}'.format(e)}
return HttpResponse(json.dumps(response), content_type='application/json')
def vote_calc(poll_dict, poll_type, poll_id):
poll_connect = poll_store()
votes = dict()
total = 0
for idx, answer in poll_dict['answers'].iteritems():
votes.update({idx: {'count': poll_connect.get_answers(poll_type, poll_id, idx).count()}})
total += votes[idx]['count']
for key, vote in votes.iteritems():
vote.update({'percent': floor((float(vote['count']) / total) * 100) if total else 0})
return votes
def poll_data(poll_type, poll_id, user_id):
poll_connect = poll_store()
poll_dict = poll_connect.get_poll(poll_type, poll_id)
user_answered = poll_connect.user_answered(poll_type, poll_id, user_id)
votes = vote_calc(poll_dict, poll_type, poll_id)
expired = False
if poll_dict['expiration'] is not None:
if poll_dict['expiration'] <= timezone.now():
expired = True
data = {'question': poll_dict['question'],
'answers': poll_dict['answers'],
'expiration': poll_dict['expiration'],
'expired': expired,
'user_answered': user_answered,
'votes': votes,
'poll_type': poll_dict['type'],
'poll_id': poll_dict['identifier'],
}
return data
def poll_view(request, poll_type, poll_id):
data = poll_data(poll_type, poll_id, request.user.id)
return render_to_response('polls/' + poll_type + '_poll.html', data)
def poll_vote(request):
try:
poll_type = request.POST.get('poll_type' | )
poll_id = request.POST.get('poll_id')
vote = request.POST.get('vote')
poll_connect = poll_store()
poll_connect.set_answer(poll_type, poll_id, request.user.id, vote)
poll_dict = poll_connect.get_poll(poll_type, poll_id)
votes = vote_calc(poll_dict, poll_type, poll_id)
|
response = {'Success': True, 'Votes': votes, 'Answers': poll_dict['answers']}
except Exception as e:
response = {'Success': False, 'Error': 'Error: {0}'.format(e)}
return HttpResponse(json.dumps(response), content_type='application/json')
def get_post_array(post, name):
"""
Gets array values from a POST.
"""
output = dict()
for key in post.keys():
value = urllib2.unquote(post.get(key))
if key.startswith(name + '[') and not value == 'undefined':
start = key.find('[')
i = key[start + 1:-1]
output.update({i: value})
return output
|
ig['clipvalue'] = self.clipvalue
return config
@classmethod
def from_config(cls, config):
return cls(**config)
class SGD(Optimizer):
"""Stochastic gradient descent optimizer.
Includes support for momentum,
learning rate decay, and Nesterov momentum.
# Arguments
lr: float >= 0. Learning rate.
momentum: float >= 0. Parameter updates momentum.
decay: float >= 0. Learning rate decay over each update.
nesterov: boolean. Whether to apply Nesterov momentum.
"""
def __init__(self, lr=0.01, momentum=0., decay=0.,
nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
self.iterations = K.variable(0., name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates .append(K.update_add(self.iterations, 1))
# momentum
shapes = [K.get_variable_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(K.update(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'momentum': float(K.get_value(self.momentum)),
'decay': float(K.get_value(self.decay)),
'nesterov': self.nesterov}
base_config = super(SGD, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RMSprop(Optimizer):
"""RMSProp optimizer.
It is recommended to leave the parameters of this optimizer
at their default values
(except the learning rate, which can be freely tuned).
This optimizer is usually a good choice for recurrent
neural networks.
# Arguments
lr: float >= 0. Learning rate.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
# References
- [rmsprop: Divide the gradient by a running average of its recent magnitude](http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
"""
def __init__(self, lr=0.001, rho=0.9, epsilon=1e-8, decay=0.,
**kwargs):
super(RMSprop, self).__init__(**kwargs)
self.lr = K.variable(lr, name='lr')
self.rho = K.variable(rho, name='rho')
self.epsilon = epsilon
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.iterations = K.variable(0., name='iterations')
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
accumulators = [K.zeros(K.get_variable_shape(p), dtype=K.dtype(p)) for p in params]
self.weights = accumulators
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates.append(K.update_add(self.iterations, 1))
for p, g, a in zip(params, grads, accumulators):
# update accumulator
new_a = self.rho * a + (1. - self.rho) * K.square(g)
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# apply constraints
if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'rho': float(K.get_value(self.rho)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(RMSprop, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adagrad(Optimizer):
"""Adagrad optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
epsilon: float >= 0.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adaptive Subgradient Methods for Online Learning and Stochastic Optimization](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
"""
def __init__(self, lr=0.01, epsilon=1e-8, decay=0., **kwargs):
super(Adagrad, self).__init__(**kwargs)
self.lr = K.variable(lr, name='lr')
self.epsilon = epsilon
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.iterations = K.variable(0., name='iterations')
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
shapes = [K.get_variable_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates.append(K.update_add(self.iterations, 1))
for p, g, a in zip(params, grads, accumulators):
new_a = a + K.square(g) # update accumulator
self.updates.append(K.update(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# apply constraints
| if p in constraints:
c = constraints[p]
new_p = c(new_p)
self.updates.append(K.update(p, new_p))
return self.updates
def get_config(self):
config = {'lr': float(K.get_value(self.lr)),
'decay': float(K.get_value(self.decay)),
'epsilon': self.epsilon}
base_config = super(Adagrad, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Adadelta(Opt | imizer):
"""Adadelta optimizer.
It is recommended to leave the parameters of this optimizer
at their default values.
# Arguments
lr: float >= 0. Learning rate.
It is recommended to leave it at the default value.
rho: float >= 0.
epsilon: float >= 0. Fuzz factor.
decay: float >= 0. Learning rate decay over each update.
# References
- [Adadelta - an adaptive learning rate method](http://arxiv.org/abs/1212.5701)
"""
def __init__(self, lr=1.0, rho=0.95, epsilon=1e-8, decay=0.,
**kwargs):
super(Adadelta, self).__init__(**kwargs)
self.lr = K.variable(lr, name='lr')
self.rho = rho
self.epsilon = epsilon
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.iterations = K.variable(0., name='iterations')
def get_updates(self, params, constraints, loss):
grads = self.get_gradients(loss, params)
shapes = [K.get_variable_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
delta_accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators + delta_accumulators
self.updates = []
lr = self.lr
if self.initial_decay > 0:
lr *= (1. / (1. + self.decay * self.iterations))
self.updates.append(K.update_ad |
ducts/1603
# ===========================================================================
class BMP085 :
i2c = None
# I guess these can be public
# I2C address
BMP085_ADDRESS = 0x77
# Operating Modes
BMP085_ULTRALOWPOWER = 0
BMP085_STANDARD = 1
BMP085_HIGHRES = 2
BMP085_ULTRAHIGHRES = 3
# BMP085 Registers
__BMP085_CAL_AC1 = 0xAA # R Calibration data (16 bits)
__BMP085_CAL_AC2 = 0xAC # R Calibration data (16 bits)
__BMP085_CAL_AC3 = 0xAE # R Calibration data (16 bits)
__BMP085_CAL_AC4 = 0xB0 # R Calibration data (16 bits)
__BMP085_CAL_AC5 = 0xB2 # R Calibration data (16 bits)
__BMP085_CAL_AC6 = 0xB4 # R Calibration data (16 bits)
__BMP085_CAL_B1 = 0xB6 # R Calibration data (16 bits)
__BMP085_CAL_B2 = 0xB8 # R Calibration data (16 bits)
__BMP085_CAL_MB = 0xBA # R Calibration data (16 bits)
__BMP085_CAL_MC = 0xBC # R Calibration data (16 bits)
__BMP085_CAL_MD = 0xBE # R Calibration data (16 bits)
__BMP085_CONTROL = 0xF4
__BMP085_TEMPDATA = 0xF6
__BMP085_PRESSUREDATA = 0xF6
__BMP085_READTEMPCMD = 0x2E
__BMP085_READPRESSURECMD = 0x34
# Private Fields
_cal_AC1 = 0
_cal_AC2 = 0
_cal_AC3 = 0
_cal_AC4 = 0
_cal_AC5 = 0
_cal_AC6 = 0
_cal_B1 = 0
_cal_B2 = 0
_cal_MB = 0
_cal_MC = 0
_cal_MD = 0
# Constructor
def __init__(self, port=1, address=BMP085_ADDRESS, mode=BMP085_STANDARD, debug=False):
self.i2c = I2C(port, I2C.MASTER)
self.address = address
self.debug = debug
# Make sure the specified mode is in the appropriate range
if ((mode < 0) | (mode > 3)):
if (self.debug):
print("Invalid Mode: Using STANDARD by default")
self.mode = self.BMP085_STANDARD
else:
self.mode = mode
# Read the calibration data
self.readCalibrationData()
def readS16(self, register):
"Reads a signed 16-bit value"
hi = ord(self.i2c.mem_read(1, | self.address, register))
if hi > 127: hi -= 256
lo = ord(self.i2c.mem_read(1, self | .address, register+1))
return (hi << 8) + lo
def readU16(self, register):
"Reads an unsigned 16-bit value"
hi = ord(self.i2c.mem_read(1, self.address, register))
lo = ord(self.i2c.mem_read(1, self.address, register+1))
return (hi << 8) + lo
def readCalibrationData(self):
"Reads the calibration data from the IC"
self._cal_AC1 = self.readS16(self.__BMP085_CAL_AC1) # INT16
self._cal_AC2 = self.readS16(self.__BMP085_CAL_AC2) # INT16
self._cal_AC3 = self.readS16(self.__BMP085_CAL_AC3) # INT16
self._cal_AC4 = self.readU16(self.__BMP085_CAL_AC4) # UINT16
self._cal_AC5 = self.readU16(self.__BMP085_CAL_AC5) # UINT16
self._cal_AC6 = self.readU16(self.__BMP085_CAL_AC6) # UINT16
self._cal_B1 = self.readS16(self.__BMP085_CAL_B1) # INT16
self._cal_B2 = self.readS16(self.__BMP085_CAL_B2) # INT16
self._cal_MB = self.readS16(self.__BMP085_CAL_MB) # INT16
self._cal_MC = self.readS16(self.__BMP085_CAL_MC) # INT16
self._cal_MD = self.readS16(self.__BMP085_CAL_MD) # INT16
if (self.debug):
self.showCalibrationData()
def showCalibrationData(self):
"Displays the calibration values for debugging purposes"
print("DBG: AC1 = %6d" % (self._cal_AC1))
print("DBG: AC2 = %6d" % (self._cal_AC2))
print("DBG: AC3 = %6d" % (self._cal_AC3))
print("DBG: AC4 = %6d" % (self._cal_AC4))
print("DBG: AC5 = %6d" % (self._cal_AC5))
print("DBG: AC6 = %6d" % (self._cal_AC6))
print("DBG: B1 = %6d" % (self._cal_B1))
print("DBG: B2 = %6d" % (self._cal_B2))
print("DBG: MB = %6d" % (self._cal_MB))
print("DBG: MC = %6d" % (self._cal_MC))
print("DBG: MD = %6d" % (self._cal_MD))
def readRawTemp(self):
"Reads the raw (uncompensated) temperature from the sensor"
self.i2c.mem_write(self.__BMP085_READTEMPCMD, self.address, self.__BMP085_CONTROL)
time.sleep(0.005) # Wait 5ms
raw = self.readU16(self.__BMP085_TEMPDATA)
if (self.debug):
print("DBG: Raw Temp: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readRawPressure(self):
"Reads the raw (uncompensated) pressure level from the sensor"
self.i2c.mem_write(self.__BMP085_READPRESSURECMD + (self.mode << 6), self.address, self.__BMP085_CONTROL)
if (self.mode == self.BMP085_ULTRALOWPOWER):
time.sleep(0.005)
elif (self.mode == self.BMP085_HIGHRES):
time.sleep(0.014)
elif (self.mode == self.BMP085_ULTRAHIGHRES):
time.sleep(0.026)
else:
time.sleep(0.008)
msb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA))
lsb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA+1))
xlsb = ord(self.i2c.mem_read(1, self.address, self.__BMP085_PRESSUREDATA+2))
raw = ((msb << 16) + (lsb << 8) + xlsb) >> (8 - self.mode)
if (self.debug):
print("DBG: Raw Pressure: 0x%04X (%d)" % (raw & 0xFFFF, raw))
return raw
def readTemperature(self):
"Gets the compensated temperature in degrees celcius"
UT = 0
X1 = 0
X2 = 0
B5 = 0
temp = 0.0
# Read raw temp before aligning it with the calibration values
UT = self.readRawTemp()
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = int(X1 + X2)
temp = ((B5 + 8) >> 4) / 10.0
if (self.debug):
print("DBG: Calibrated temperature = %f C" % temp)
return temp
def readPressure(self):
"Gets the compensated pressure in pascal"
UT = 0
UP = 0
B3 = 0
B5 = 0
B6 = 0
X1 = 0
X2 = 0
X3 = 0
p = 0
B4 = 0
B7 = 0
UT = self.readRawTemp()
UP = self.readRawPressure()
# You can use the datasheet values to test the conversion results
# dsValues = True
dsValues = False
if (dsValues):
UT = 27898
UP = 23843
self._cal_AC6 = 23153
self._cal_AC5 = 32757
self._cal_MB = -32768;
self._cal_MC = -8711
self._cal_MD = 2868
self._cal_B1 = 6190
self._cal_B2 = 4
self._cal_AC3 = -14383
self._cal_AC2 = -72
self._cal_AC1 = 408
self._cal_AC4 = 32741
self.mode = self.BMP085_ULTRALOWPOWER
if (self.debug):
self.showCalibrationData()
# True Temperature Calculations
X1 = ((UT - self._cal_AC6) * self._cal_AC5) >> 15
X2 = (self._cal_MC << 11) / (X1 + self._cal_MD)
B5 = int(X1 + X2)
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: B5 = %d" % (B5))
print("DBG: True Temperature = %.2f C" % (((B5 + 8) >> 4) / 10.0))
# Pressure Calculations
B6 = B5 - 4000
X1 = (self._cal_B2 * (B6 * B6) >> 12) >> 11
X2 = (self._cal_AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((self._cal_AC1 * 4 + X3) << self.mode) + 2) / 4
if (self.debug):
print("DBG: B6 = %d" % (B6))
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
X1 = (self._cal_AC3 * B6) >> 13
X2 = (self._cal_B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) >> 2
B4 = (self._cal_AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> self.mode)
if (self.debug):
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
print("DBG: X3 = %d" % (X3))
print("DBG: B4 = %d" % (B4))
print("DBG: B7 = %d" % (B7))
if (B7 < 0x80000000):
p = int((B7 * 2) / B4)
else:
p = int((B7 / B4) * 2)
if (self.debug):
print("DBG: X1 = %d" % (X1))
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
if (self.debug):
print("DBG: p = %d" % (p))
print("DBG: X1 = %d" % (X1))
print("DBG: X2 = %d" % (X2))
p = p + ((X1 + X2 + 3791) >> 4)
if (self.debug):
print("DBG: Pressure = %d Pa" % (p))
return p
def readAltitude(self, seaLevelPressure=101325):
"Calculates the altit |
import math
import re
from collections import defaultdict
def matches(t1, t2):
t1r = "".join([t[-1] for t in t1])
t2r = "".join([t[-1] for t in t2])
t1l = "".join([t[0] for t in t1])
t2l = "".join([t[0] for t in t2])
t1_edges = [t1[0], t1[-1], t1r, t1l]
t2_edges = [t2[0], t2[-1], t2[0][::-1], t2[-1][::-1], t2l, t2l[::-1], t2r, t2r[::-1]]
for et1 in t1_edges:
for et2 in t2_edges:
if et1 == et2:
return True
return False
def flip(t):
return [l[::-1] for l in t]
# https://stackoverflow.com/a/34347121
def rotate(t):
return [*map("".join, zip(*reversed(t)))]
def set_corner(cor, right, down):
rr = "".join([t[-1] for t in right])
dr = "".join([t[-1] for t in down])
rl = "".join([t[0] for t in right])
dl = "".join([t[0] for t in down])
r_edges = [right[0], right[-1], right[0][::-1], right[-1][::-1], rr, rr[::-1], rl, rl[::-1]]
d_edges = [down[0], down[-1], down[0][::-1], down[-1][::-1], dr, dr[::-1], dl, dl[::-1]]
for _ in range(2):
cor = flip(cor)
for _ in range(4):
cor = rotate(cor)
if cor[-1] in d_edges and "".join([t[-1] for t in cor]) in r_edges:
return cor
return None
def remove_border(t):
return [x[1:-1] for x in t[1:-1]]
def set_left_edge(t1, t2):
ref = "".join([t[-1] for t in t1])
for _ in range(2):
t2 = flip(t2)
for _ in range(4):
t2 = rotate(t2)
if "".join([t[0] for t in t2]) == ref:
return t2
return None
def set_upper_edge(t1, t2):
ref = t1[-1]
for _ in range(2):
t2 = flip(t2)
for _ in range(4):
t2 = rotate(t2)
if t2[0] == ref:
return t2
return None
def assemble_image(img, tiles):
whole_image = []
for l in img:
slice = [""] * len(tiles[l[0]])
for t in l:
for i, s in enumerate(tiles[t]):
slice[i] += s
for s in slice:
whole_image.append(s)
return whole_image
def part1():
tiles = defaultdict(list)
for l in open("input.txt"):
if "Tile" in l:
tile = int(re.findall(r"\d+", l)[0])
elif "." in l or "#" in l:
tiles[tile].append(l.strip())
connected = defaultdict(set)
for i in tiles:
for t in tiles:
if i == t:
continue
if matches(tiles[i], tiles[t]):
connected[i].add(t)
connected[t].add(i)
prod = 1
for i in connected:
if len(connected[i]) == 2:
prod *= i
print(prod)
def part2():
tiles = defaultdict(list)
for l in open("input.txt"):
if "Tile" in l:
tile = int(re.findall(r"\d+", l)[0])
elif "." in l or "#" in l:
tiles[tile].append(l.strip())
connected = defaultdict(set)
for i in tiles:
for t in tiles:
if i == t:
continue
if matches(tiles[i], tiles[t]):
connected[i].add(t)
connected[t].add(i)
sz = int(math.sqrt(len(connected)))
image = [[0 for _ in range(sz)] for _ in range(sz)]
for i in connected:
if len(connected[i]) == 2:
corner = i
break
image[0][0] = corner
added = {corner}
for y in range(1, sz):
pos = connected[image[0][y - 1]]
for cand in pos:
if cand not in added and len(connected[cand]) < 4:
image[0][y] = cand
| added.add(cand)
break
for x in range(1, sz):
for y in range(sz):
pos = connected[image[x - 1][y]]
for cand in pos:
if cand not in added:
image[x][y] = cand
added.add(cand)
break
tiles[image[0][0]] = set_corner(tiles[image[0][0]], tiles[image[0][1]], tiles[image[1][0]])
for y, l in enumerate(image):
if y != 0:
prv = image[y - 1][0]
| tiles[l[0]] = set_upper_edge(tiles[prv], tiles[l[0]])
for x, tile in enumerate(l):
if x != 0:
prv = image[y][x - 1]
tiles[tile] = set_left_edge(tiles[prv], tiles[tile])
for t in tiles:
tiles[t] = remove_border(tiles[t])
image = assemble_image(image, tiles)
ky = 0
monster = set()
for l in open("monster.txt").read().split("\n"):
kx = len(l)
for i, ch in enumerate(l):
if ch == "#":
monster.add((i, ky))
ky += 1
for _ in range(2):
image = flip(image)
for _ in range(4):
image = rotate(image)
for x in range(0, len(image) - kx):
for y in range(0, len(image) - ky):
parts = []
for i, p in enumerate(monster):
dx = x + p[0]
dy = y + p[1]
parts.append(image[dy][dx] == "#")
if all(parts):
for p in monster:
dx = x + p[0]
dy = y + p[1]
image[dy] = image[dy][:dx] + "O" + image[dy][dx + 1 :]
with open("output.txt", "w+") as f:
for l in rotate(rotate(rotate(image))):
f.write(l + "\n")
print(sum([l.count("#") for l in image]))
if __name__ == "__main__":
part1()
part2()
|
import subprocess
import os
pathToGrabber = os.path.abspath("grab.mk")
def getVariable(var):
proc = subprocess.Popen(["make", "--silent", "-f", pathToGrabber, "GETVAR", "VARNAME=%s" % var], stdout = subprocess.PIPE)
return proc.communicate()[0].strip()
def getVariableList(var):
return [item.strip() for item in getVariable(var).split()]
#print getTargetOutput("LIBRARY")
#print getTargetOutput("FILES")
#print getTargetOutput("PROGRAM")
def getSources():
return ["%s.c" % fn.strip() for fn in getVariableList("FILES")]
def getTarget():
ret = {}
prog = getVariable("PROGRAM")
lib = getVariable("LIBRARY")
if len(prog) > 0:
return prog, "add_executable", ""
elif len(lib) > 0:
if getVariable("SHARED") == "1":
return lib, "add_library", "SHARED"
else:
return lib, "add_library", "STATIC"
else:
return None, "", ""
def doDirectory(sourcedir):
print sourcedir
os.chdir(sourcedir)
with open("CMakeLists.txt","w") as cmake:
cmake.write("include_directories(.)\n")
target, targetcommand, targettype = getTarget()
if target is not None:
generated = getVariableList("PRECOMP")
if len(generated) > 0:
cmake.write("set(GENERATED\n")
cmake.writelines(["\t%s\n" % fn for fn in generated])
cmake.write(")\n")
cmake.write("# TODO: generate these files!\n\n\n")
sources = getSources()
cmake.write("set(SOURCES\n")
cmake.writelines(["\t%s\n" % fn for fn in sources if fn not in generated])
cmake.writelines(["\t${CMAKE_CURRENT_BINARY_DIR}/%s\n" % fn for fn in generated])
cmake.writelines(["\t%s\n" % fn for fn in getVariableList("LIB_DEFS") if fn not in generated and fn not in sources])
cmake.write(")\n")
libs = [ lib.replace("-l", "") for lib in getVariableList("LIBRARIES") ]
if getVariable("OPENGL"):
libs.append("${OPENGL_LIBRARIES}")
cmake.write("include_directories(${OPENGL_INCLUDE_DIR})\n")
if getVariable("GLUT"):
libs.append("${GLUT_LIBRARIES}")
cmake.write("include_directories(${GLUT_INCLUDE_DIR})\n")
cmake.write("%s(%s %s ${SOURCES})\n" % (targetcommand, target, targettype))
if len(libs) > 0:
cmake.write("target_link_libraries(%s %s)\n" % (target, " ".join(libs)))
cmake.write("target_link_libraries(%s ${EXTRA_LIBS})\n" % target)
if "-lX11" in getVariableList("LDFLAGS"):
cmake.write("if(X11_PLATFORM)\n")
cmake.write("\ttarget_link_libraries(%s ${X11_LIBRARIES})\n" % target)
cmake.write("endif()\n")
cmake.write("""install(TARGETS %s
LIBRARY DESTINATION lib COMPONENT runtime
ARCHIVE DESTINATION lib COMPONENT dev
RUNTIME DESTINATION bin COMPONENT runtime)\n""" % target)
for copy in getVariableList("LIB_COPIES"):
copytarget = "%s_%s_copy" % (copy, target)
cmake.write("%s(%s %s ${SOURCES})\n" % (targetcommand, copytarget, targettype))
if len(libs) > 0:
cmake.write("target_link_libraries(%s %s)\n" % (copytarget, " ".join(libs)))
cmake.write("target_link_libraries(% | s ${EXTRA_LIBS})\n" % copytarget)
cmake.write("""install(TARGETS %s
LIBRARY DESTINATION lib COMPONENT runtime
ARCHIVE DESTINATION | lib COMPONENT dev
RUNTIME DESTINATION bin COMPONENT runtime)\n""" % copytarget)
dirs = getVariableList("SUBDIRS")
if len(dirs) > 0:
cmake.writelines(["add_subdirectory(%s)\n" % dirname for dirname in dirs])
sourcedirs = [ dirpath for (dirpath, dirnames, filenames) in os.walk(os.path.abspath(".")) if "Makefile" in filenames]
for sourcedir in sourcedirs:
doDirectory(sourcedir)
|
False
elif event.key == pygame.K_LEFT:
#停止移动飞船
ship.moving_left = False
def start_game(ai_settings, screen, stats, score, play_button, ship, aliens,
bullets):
"""开始游戏"""
if not stats.game_active:
# 重置游戏设置
ai_settings.initialize_dynamic_settings()
# 隐藏鼠标
pygame.mouse.set_visible(False)
# 重置游戏统计信息
stats.rest_stats()
stats.game_active = True
# 重置记分牌图像
score.prep_score()
score.prep_high_score()
score.prep_level()
# 重置剩余飞船数目信息
score.prep_ships()
# 清空外星人和子弹列表
aliens.empty()
bullets.empty()
# 创建一群外星人,并将飞船放在底部正中央
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
def check_play_button(ai_settings, screen, stats, score, play_button, ship,
aliens, bullets, mouse_x, mouse_y):
"""在玩家点击Play按钮时候开始游戏"""
button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
if button_clicked:
start_game(ai_settings, screen, stats, score, play_button, ship, aliens,
bullets)
def check_events(ai_settings, screen, stats,score, play_button, ship, aliens,
bullets):
"""监视键盘和鼠标事件"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
save_high_score(stats)
sys.exit()
elif event.type == pygame.KEYDOWN:
check_key_down_events(event,ai_settings, screen, stats, play_button,
ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_key_up_events(event, ship)
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_x, mouse_y = pygame.mouse.get_pos()
check_play_button(ai_settings, screen, stats, score, play_button,
ship, aliens, bullets, mouse_x, mouse_y)
def check_high_score(stats, score):
"""检查是否诞生了最高分"""
if stats.score > stats.high_score:
stats.high_score = stats.score
score.prep_high_score()
def update_bullets(ai_settings, screen, stats, ship, aliens, bullets, score):
"""更新子弹位置,删除已经消失的子弹"""
# 更新子弹位置
bullets.update()
# 删除已经消失的子弹
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, ship, aliens,
bullets, score)
def check_bullet_alien_collisions(ai_settings, screen, stats, ship, aliens,
bullets, score):
# 检查是否有子弹击中外星人,如果有,就删除对应的子弹和外星人
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
# 每杀死一个外星人,加分
if collisions:
# 添加击中外星人音效
play_sound_effect_bomp(ai_settings)
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
score.prep_score()
# 检查是否超过最高分
check_high_score(stats, score)
# 升级
start_new_level(ai_settings, screen, stats, ship, aliens, bullets, score)
def start_new_level(ai_settings, screen, stats, ship, aliens, bullets, score):
if len(aliens) == 0:
# 删除现有的子弹,增加一个等级,加快游戏节奏,并且新建一群外星人
bullets.empty()
ai_settings.increase_speed()
stats.level += 1
score.prep_level()
create_fleet(ai_settings, screen, ship, aliens)
def play_sound_effect_shot(ai_settings):
# 添加发射子弹的音效
file_sound_shot = ai_settings.file_sound_shot
try:
sound_effect_shot = pygame.mixer.Sound(file_sound_shot)
sound_effect_shot.play()
except pygame.error:
print("The file " + file_sound_shot + " does not exist!")
def play_sound_effect_bomp(ai_settings):
# 添加击中外星人的音效
file_sound_bomp = ai_settings.file_sound_bomp
try:
sound_effect_bomp = pygame.mixer.Sound(file_sound_bomp)
sound_effect_bomp.play()
except pygame.error:
print("The file " + file_sound_bomp + " does not exist!")
def play_sound_effect_game_over(ai_settings):
# 添加游戏结束的音效
file_sound_game_over = ai_settings.file_sound_game_over
try:
sound_effect_game_over = pygame.mixer.Sound(file_sound_game_over)
sound_effect_game_over.play()
except pygame.error:
print("The file " + file_sound_game_over + " does not exist!")
def fire_bullet(ai_settings, screen, ship, bullets):
"""如果没有超过子弹数上限,就发射一颗子弹"""
if len(bullets) < ai_settings.bullets_allowed:
# 添加发射子弹的音效
play_sound_effect_shot(ai_settings)
# 创建一个子弹,并将子弹加入到编组bullets中
net_bullet = Bullet(ai_settings, screen, ship)
bullets.add(net_bullet)
def get_number_aliens_x(ai_settings, alien_width):
"""计算每行可以容纳多少外星人"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""计算屏幕可容纳多少行外星人"""
available_space_y = ai_settings.screen_height - 3 * alien_height - ship_height
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_alien(ai_settings, screen, aliens, alien_number, row_number):
"""创建一个外星人并把它加入当前行"""
alien = Alien(ai_settings, screen)
alien_width = alien.rect.width
alien.x = alien_width + 2 * alien_width * alien_number
alien.rect.x = alien.x
alien.rect.y = alien.rect.height + 2 * alien.rect.height * row_number
aliens.add(alien)
def create_fleet(ai_settings, screen, ship, aliens):
"""创建外星人群"""
# 创建一个外星人并计算一行可以容纳多少外星人
# 外星人间距为外星人宽度
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
for row_number in range(number_rows):
# 创建第一行外星人
for alien_number in range(number_aliens_x):
# 创建一个外星人并把它加入当前行
create_alien(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(a | i_settings, aliens):
"""有外星人到达屏幕边缘时采取相应措施"""
for alien in aliens:
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""将外星人下移并改变他们的方向"" | "
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, stats, score, screen, ship, aliens, bullets):
"""检查是否有外星人到达屏幕边缘,然后更新外星人位置"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# 检查外星人和飞船之间的碰撞
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, stats, score, screen, ship, aliens, bullets)
# 检查是否有外星人到达屏幕底部
check_aliens_bottom(ai_settings, stats, score, screen, ship, aliens, bullets)
def check_aliens_bottom(ai_settings, stats, score, screen, ship, aliens, bullets):
"""检查是否有外星人到达屏幕底部"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# 与飞船被撞相同处理
ship_hit(ai_settings, stats, score, screen, ship, aliens, bullets)
break
def ship_hit(ai_settings, stats, score, screen, ship, aliens, bullets):
"""相应外星人撞到飞船"""
if stats.ship_left > 1:
# 将ship_left减一
stats.ship_left -= 1
# 更新剩余飞船数目
score.prep_ships()
# 清空外星人和子弹列表
aliens.empty()
bullets.empty()
# 创建一群外星人,并将飞船放在底部正中央
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# 暂停
sleep(0.5)
else:
# 添加游戏结束的音效
play_sound_effect_game_over(ai_settings)
stats.game_active = False
pygame.mouse.set_visible(True)
sleep(0.5)
# 清空外星人和子弹列表
aliens.empty()
bullets.empty()
|
# -*- coding: utf-8 -*-
# Release information abou | t mse
version = '1.0'
# description = "Your plan to rule the world"
# long_description = "More description about your plan"
# author = "Your Name Here"
# email = "YourEmail@YourDomain"
# copyright = "Copyright 2011 - the year of the Rabbit"
# Of it's open source, you might want to specify these:
# url = 'http://yourcool.site/'
# download_url = 'http://yo | urcool.site/download'
# license = 'MIT'
|
#!/usr/bin/env python
"""
Unit tests for the assets module
"""
|
# TODO: Write actual tests.
import unittest
import assets
class TestAssets(unittest.TestCase):
def setUp(self | ):
pass
def test_getImage(self):
pass
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Project',
'version': '1.1',
'website': 'https://www.odoo.com/page/project-management',
'category': 'Operations/Project',
'sequence': 10,
'summary': 'Organize and schedule your projects ',
'depends': [
| 'base_setup',
'mail',
'portal',
'rating',
'resource',
'web',
'web_tour',
'digest',
],
'description': "",
'data': [
'security/project_security.xml',
'security/ir.model.access.csv',
'report/project_report_views.x | ml',
'views/digest_views.xml',
'views/rating_views.xml',
'views/project_views.xml',
'views/res_partner_views.xml',
'views/res_config_settings_views.xml',
'views/mail_activity_views.xml',
'views/project_assets.xml',
'views/project_portal_templates.xml',
'views/project_rating_templates.xml',
'data/digest_data.xml',
'data/project_mail_template_data.xml',
'data/project_data.xml',
],
'qweb': ['static/src/xml/project.xml'],
'demo': ['data/project_demo.xml'],
'test': [
],
'installable': True,
'auto_install': False,
'application': True,
}
|
from yade import export,polyhedra_utils
mat = PolyhedraMat()
O.bodies.append([
sphere((0,0,0),1),
sphere((0,3,0),1),
sphere((0,2,4),2),
sphere((0,5,2),1.5),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(5,4,0)]),
facet([Vector3(0,-3,-1),Vector3(0,-2,5),Vector3(-5,4,0)]),
polyhedra_utils.polyhedra(mat,(1,2,3),0) | ,
polyhedra_utils.polyhedralBall(2,20,mat,(-2,-2,4)),
])
O.bodies[-1].state.pos = (-2,-2,-2)
O.bodies[-1].state.ori = Quaternion((1,1,2),1)
O.bodies[-2].state.pos = (-2,-2,3)
O.bodies[-2].state.ori = Quaternion((1,2,0),1)
createInteraction(0,1)
createInteraction(0,2)
createInteraction(0,3)
createInteraction(1,2)
createInteraction(1,3)
createInteraction(2,3)
O.step()
vtkExporter = export.VTKExporter('vtkExporterTesting')
vtkExporter.exportSpheres(what=[('dis | t','b.state.pos.norm()')])
vtkExporter.exportFacets(what=[('pos','b.state.pos')])
vtkExporter.exportInteractions(what=[('kn','i.phys.kn')])
vtkExporter.exportPolyhedra(what=[('n','b.id')])
|
0;""'})
def numericWriter(nRow,nCol,cell,style):
if type(cell) is str and cell[0]=='=':
ws.write_formula(nRow,nCol,cell,style,self.formulaValues[nRow-self.nHeaderRows][nCol])
else:
ws.write_number(nRow,nCol,cell,style)
codeColumnsData={
'departmentCode': {'text':'Код ведомства', 'width':4, 'headerStyle':styleVeryThinHeader, 'cellStyle':styleCentered,'shallowCellStyle':styleShallowCentered, 'writer':ws.write_string},
'superSectionCode': {'text':'Код надраздела', 'width':5, 'headerStyle':styleThinHeader, 'cellStyle':styleCentered,'shallowCellStyle':styleShallowCentered, 'writer':ws.write_string},
'sectionCode': {'text':'Код раздела', 'width':5, 'headerStyle':styleThinHeader, 'cellStyle':styleCentered,'shallowCellStyle':styleShallowCentered, 'writer':ws.write_string},
'categoryCode': {'text':'Код целевой статьи', 'width':8, 'headerStyle':styleThinHeader, 'cellStyle':styleCentered,'shallowCellStyle':styleShallowCentered, 'writer':ws.write_string},
'typeCode': {'text':'Код вида расходов', 'width':4, 'headerStyle':styleVeryThinHeader, 'cellStyle':styleCentered,'shallowCellStyle':styleShallowCentered, 'writer':ws.write_string},
}
columns=[
{'text':'№ в приложении '+str(appendix),'width':10,'headerStyle':styleThinHeader,'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard,'writer':ws.write_string} for appendix in self.yearsInAppendices
]+[
{'text':'Наименование','width':100,'headerStyle':styleHeader,'cellStyle':styleStandard,'shallowCellStyle':styleShallowStandard,'writer':ws.write_string}
]+[
codeColumnsData[col] for cols in self.levelColLists for col in cols
]+[
{'text':[f(v) for f,v in zip(self.fakeYearNameFns,year)],'width':15,'headerStyle':styleHeader,'cellStyle':styleAmount,'shallowCellStyle':styleShallowAmount,'writer':numericWriter} for year in self.years
]
ws.freeze_panes(self.nHeaderRows,0)
ws.set_row(0,22)
ws.merge_range(0,0,0,len(columns)-1,tableTitle,styleTableTitle)
if tableSubtitle:
ws.merge_range(1,0,1,len(columns)-1,tableSubtitle)
for i in range(self.nHeaderRows-len(self.fakeYearNameFns),self.nHeaderRows):
ws.set_row(i,60//len(self.fakeYearNameFns))
self.makeSheetHeader(
columns,
lambda nCol,width: ws.set_column(nCol,nCol,width),
ws.write,
ws.merge_range
)
for nRow,row in enumerate(self.outRows):
if self.levels[nRow]>=0:
ws.set_row(self.nHeaderRows+nRow,options={'level':self.levels[nRow]+1})
for nCol,(cell,col) in enumerate(zip(row,columns)):
shallow=self.levels[nRow]<nLevels//2
style=col['shallowCellStyle' if shallow else 'cellStyle']
if cell is None:
continue
col['writer'](self.nHeaderRows+nRow,nCol,cell,style)
wb.close()
def getSubTitle(stageNumber,amendmentFlag):
title="из "
if amendmentFlag<=0:
title+="проекта Закона "
else:
title+="Закона "
title+="Санкт-Петербурга "
if stageNumber<=0:
title+="«О бюджете Санкт-Петербурга на 2014 год и на плановый период 2015 и 2016 годов»"
else:
title+="«О внесении изменений и дополнений в Закон Санкт-Петербурга „О бюджете Санкт-Петербурга на 2014 год и на плановый период 2015 и 2016 годов“»"
return title
def makeDepartmentReports(conn,stageNumber,amendmentFlag,appendix1,appendix23):
if appendix1 is not None and appendix23 is not None:
years={appendix1:[(2014,)],appendix23:[(2015,),(2016,)]}
else:
years=[(2014,),(2015,),(2016,)]
table=LevelTable(
[
['departmentCode'],
['sectionCode','categoryCode'],
['typeCode'],
],[
'departmentName',
'categoryName',
'typeName',
],[
'year',
],[
lambda year: 'Сумма на '+str(year)+' г. (тыс. руб.)',
],
years,
conn.execute("""
SELECT departmentName,categoryName,typeName,departmentCode,sectionCode,categoryCode,typeCode,year, SUM(amount) AS amount
FROM items
JOIN departments USING(departmentCode)
JOIN categories USING(categoryCode)
JOIN types USING(typeCode)
JOIN edits USING(editNumber)
JOIN documents USING(documentNumber)
WHERE stageNumber<? OR (stageNumber<=? AND amendmentFlag<=?)
GROUP BY departmentName,categoryName,typeName,departmentCode,sectionCode,categoryCode,typeCode,year
HAVING SUM(amount)<>0
ORDER BY departmentOrder,sectionCode,categoryCode,typeCode,year
""",[stageNumber,stageNumber,amendmentFlag]),
3
)
title='Ведомственная структура расходов бюджета Санкт-Петербурга'
filebasename='2014.'+str(stageNumber)+'.'+('p' if amendmentFlag<=0 else 'z')+'.department'
table.makeXls(title,outputDirectory+'/'+filebasename+'.xls',tableSubtitle=getSubTitle(stageNumber,amendmentFlag))
table.makeXlsx(title,outputDirectory+'/'+filebasename+'.xlsx',tableSubtitle=getSubTitle(stageNumber,amendmentFlag))
def makeSectionReports(conn,stageNumber,amendmentFlag,appendix1,appendix23):
if appendix1 is not None and appendix23 is not None:
years={appendix1:[(2014,)],appendix23:[(2015,),(2016,)]}
else:
years=[(2014,),(2015,),(2016,)]
table=LevelTable(
[
['superSectionCode'],
['sectionCode'],
['categoryCode'],
['typeCode'],
],[
| 'superSectionName',
'sectionName',
'categoryName',
'typeName',
],[
'year',
],[
lambda year: 'Сумма на '+str(year)+' г. (тыс. руб.)',
],
years,
conn.execute("""
SELECT superSectionName,sectionName,categoryName,typeName,superSectionCode,sectionCode,categoryCode,typeCode,year, SUM(amount) AS amount
FROM items
JOIN sections USING(sectionCode)
JOIN superSections USING(superSectionCode)
JOIN categories USING(categoryCode)
JOIN types USING(typeCode)
JOIN | edits USING(editNumber)
JOIN documents USING(documentNumber)
WHERE stageNumber<? OR (stageNumber<=? AND amendmentFlag<=?)
GROUP BY superSectionName,sectionName,categoryName,typeName,superSectionCode,sectionCode,categoryCode,typeCode,year
HAVING SUM(amount)<>0
ORDER BY superSectionCode,sectionCode,categoryCode,typeCode,year
""",[stageNumber,stageNumber,amendmentFlag]),
3
)
title='Распределение бюджетных ассигнований бюджета Санкт-Петербурга'
filebasename='2014.'+str(stageNumber)+'.'+('p' if amendmentFlag<=0 else 'z')+'.section'
table.makeXls(title,outputDirectory+'/'+filebasename+'.xls',tableSubtitle=getSubTitle(stageNumber,amendmentFlag))
table.makeXlsx(title,outputDirectory+'/'+filebasename+'.xlsx',tableSubtitle=getSubTitle(stageNumber,amendmentFlag))
with sqlite3.connect(':memory:') as conn:
conn.row_factory=sqlite3.Row
conn.execute('pragma foreign_keys=ON')
conn.executescript(
open(inputFilename,encoding='utf8').read()
)
for stageNumber,amendmentFlag,departmentAppendix1,departmentAppendix23,sectionAppendix1,sectionAppendix23 in (
(0,0,3,4,5,6),
(0,2,3,4,5,6),
(1,0,2,None,3,None),
(1,2,2,None,3,None),
):
makeDepartmentReports(conn,stageNumber,amendmentFlag,departmentAppendix1,departmentAppendix23)
makeSectionReports(conn,stageNumber,amendmentFlag,sectionAppendix1,sectionAppendix23)
exit() #### skip the rest ####
# governor/bfk amendments table
for documentNumber,documentName in (
('3765','Поправка Губернатора'),
('3781','Поправка БФК'),
('3850','«Юридико-технические правки» БФК'),
):
table=LevelTable(
[
['departmentCode'],
['sectionCode','categoryCode'],
['typeCode'],
],[
'departmentName',
'categoryName',
'typeName',
],[
'year',
],[
lambda year: 'Изменения на '+str(year)+' г. (тыс. руб.)',
],
[(2014,),(2015,),(2016,)],
conn.execute("""
SELECT departmentName,categoryName,typeName,departmentCode,sectionCode,categoryCode,typeCode,year, SUM(amount) AS amount
FROM items
JOIN departments USING(departmentCode)
JOIN categories USING(categoryCode)
JOIN types USING(typeCode)
JOIN edits USING(editNumber)
WHERE documentNumber=?
GROUP BY departmentName,categoryName,typeName,departmentCode,sectionCode,categoryCode,typeCode,year
ORDER BY departmentOrder,sectionCode,categoryCode,typeCode,year
""",[documentNumber])
)
table.makeXlsx(
documentName+" к Закону Санкт-Петербурга «О бюджете Санкт-Петербурга на 2014 год и на плановый период 2015 и 2016 годов»",
outputDirectory+'/amendment'+documentNumber+'-2014-16.xlsx'
)
# experimental project+a |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" This custom XRCED launcher allows a small wx function to be wrapped
so it provides a little extra needed functionality.
XRC sometimes need to check if a node contains a filename. It does so by
checking node types. This works fine, until we start working with custom
controls, of which XRC knows nothing by default.
The little wrapper added to the pywxrc.XmlResourceCompiler.NodeContainsFilename
method, will return true if it contains a value ending with '.png', indicating
the content is an PNG image.
"""
import os
import sys
if __name__ == '__main__':
try:
# XRCed is sometimes installed standalone
from XRCed.xrced import main
sys.modules['wx.tools.XRCed'] = sys.modules['XRCed']
except ImportError:
try:
from wx.tools.XRCed.xrced import main
from wx.tools.XRCed.globals import set_debug
set_debug(True)
except ImportError:
print >> sys.stderr, 'Check that XRCed is installed and is in PYTHONPATH'
raise
from wx.tools import pywxrc
# The XRCEDPATH environment variable is used to define additional plugin directories
xrced_path = os.getenv('XRCEDPATH')
this_path = os.path.dirname(__file__)
os.environ['XRCEDPATH'] = xrced_path or os.path.join(this_path, "../src/odemis/gui/xmlh")
print "'XRCEDPATH' is set to %s" % os.getenv('XRCEDPATH')
# Move this to a separate launcher so it can be | spread with
# odemis
def ncf_decorator(ncf):
def wrapper(self, node):
if node.firstChild and node.firstChild.nodeType == 3:
if node.firstChild.nodeValue.lower().endswith((".png", ".jpg")):
# print node.firstChild.nodeValue
return True
return ncf(self, node)
return wrapper
pywxrc.XmlResourceCompiler.NodeContainsFilename = ncf_decorator(pywxrc.XmlResourceCompiler.NodeC | ontainsFilename)
main()
|
ent_id, "Memory grep completed, found %d hits." %
len(responses),
aff4.ROOT_URN.Add(client_id).Add(self.state.output))
else:
self.LogClientError(client_id, log_message=utils.SmartStr(
responses.status))
self.CallFlow("UnloadMemoryDriver", next_state="MarkDone",
client_id=client_id)
@flow.StateHandler()
def MarkDone(self, responses):
"""Mark a client as done."""
client_id = responses.request.client_id
self.MarkCl | ientDone(client_id)
class GenericHunt(implementation.GRRHunt):
"""This is a hunt to start any flow on multiple clients.
Args:
flow_name: The flow to run.
args: A dict containing the parameters for the flow.
"""
hunt_typeinfo = type_info.TypeDescriptorSet(
type_info.String(
description="Name of flow to run.",
name="flow_name",
default=""),
type_info.GenericProtoDictType(
name="args",
description="Para | meters passed to the child flow.",
),
type_info.List(
name="output_plugins",
description="The output plugins to use for this hunt.",
default=[("CollectionPlugin", {})],
validator=type_info.List(validator=type_info.Any()),
),
)
def InitFromArguments(self, **kw):
"""Initializes this hunt from arguments."""
super(GenericHunt, self).InitFromArguments(**kw)
# Create all the output plugin objects.
self.state.Register("output_objects", [])
for plugin_name, args in self.state.output_plugins:
if plugin_name not in output_plugins.HuntOutputPlugin.classes:
raise HuntError("Invalid output plugin name: %s.", plugin_name)
cls = output_plugins.HuntOutputPlugin.classes[plugin_name]
self.state.output_objects.append(cls(self, **dict(args.items())))
self.SetDescription()
def SetDescription(self):
desc = []
for k, v in sorted(self.state.args.ToDict().items()):
desc.append("%s=%s" % (utils.SmartStr(k), utils.SmartStr(v)))
description = "%s { %s }." % (
self.state.flow_name, ", ".join(desc))
self.state.context.description = description
@flow.StateHandler(next_state=["MarkDone"])
def Start(self, responses):
client_id = responses.request.client_id
args = self.state.args.ToDict()
if not self.state.output_plugins:
args["send_replies"] = False
self.CallFlow(self.state.flow_name, next_state="MarkDone",
client_id=client_id, **args)
def Save(self):
with self.lock:
# Flush results frequently so users can monitor them as they come in.
for plugin in self.state.output_objects:
plugin.Flush()
super(GenericHunt, self).Save()
@flow.StateHandler()
def MarkDone(self, responses):
"""Mark a client as done."""
client_id = responses.request.client_id
# Open child flow and account its' reported resource usage
flow_path = responses.status.child_session_id
flow_obj = aff4.FACTORY.Open(flow_path, mode="r", token=self.token)
client_res = flow_obj.state.context.client_resources
resources = rdfvalue.ClientResources()
resources.client_id = client_id
resources.session_id = flow_path
resources.cpu_usage.user_cpu_time = client_res.cpu_usage.user_cpu_time
resources.cpu_usage.system_cpu_time = client_res.cpu_usage.system_cpu_time
resources.network_bytes_sent = flow_obj.state.context.network_bytes_sent
self.state.context.usage_stats.RegisterResources(resources)
if responses.success:
msg = "Flow %s completed." % responses.request.flow_name
self.LogResult(client_id, msg)
with self.lock:
for plugin in self.state.output_objects:
for response in responses:
plugin.ProcessResponse(response, client_id)
else:
self.LogClientError(client_id, log_message=utils.SmartStr(
responses.status))
self.MarkClientDone(client_id)
def GetOutputObjects(self, output_cls=None):
result = []
for obj in self.state.output_objects:
if output_cls is None or isinstance(obj, output_cls):
result.append(obj)
return result
class VariableGenericHunt(GenericHunt):
"""A generic hunt using different flows for each client.
Args:
flows: A dictionary where the keys are the client_ids to start flows on and
the values are lists of pairs (flow_name, dict of args) similar to
the generic hunt above.
"""
hunt_typeinfo = type_info.TypeDescriptorSet(
type_info.GenericProtoDictType(
name="flows",
description=("A dictionary where the keys are the client_ids to start"
" flows on and the values are lists of pairs (flow_name,"
" dict of args)"),
),
type_info.List(
name="output_plugins",
description="The output plugins to use for this hunt.",
default=[("CollectionPlugin", {})],
validator=type_info.List(validator=type_info.Any()),
),
)
def InitFromArguments(self, **kw):
"""Initializes this hunt from arguments."""
super(VariableGenericHunt, self).InitFromArguments(**kw)
client_id_re = aff4_grr.VFSGRRClient.CLIENT_ID_RE
for client_id in self.state.flows:
if not client_id_re.match(client_id.Basename()):
raise HuntError("%s is not a valid client_id." % client_id)
def SetDescription(self):
self.state.context.description = "Variable Generic Hunt"
@flow.StateHandler(next_state=["MarkDone"])
def Start(self, responses):
client_id = responses.request.client_id
try:
flow_list = self.state.flows[client_id]
except KeyError:
self.LogClientError(client_id, "No flow found for client %s." % client_id)
self.MarkClientDone(client_id)
return
for flow_name, args in flow_list:
self.CallFlow(flow_name, next_state="MarkDone", client_id=client_id,
**args.ToDict())
def ManuallyScheduleClients(self):
"""Schedule all flows without using the Foreman.
Since we know all the client ids to run on we might as well just schedule
all the flows and wait for the results.
"""
for client_id in self.state.flows:
self.StartClient(self.session_id, client_id,
self.state.context.client_limit)
class CollectFilesHunt(implementation.GRRHunt):
"""A hunt to collect files from various clients.
Args:
files_by_client:
A dictionary where the keys are the client_ids to collect files from and
the values are lists of Pathspecs to get from this client.
"""
hunt_typeinfo = type_info.TypeDescriptorSet(
type_info.Any(
name="files_by_client",
default={}))
def InitFromArguments(self, **kw):
super(CollectFilesHunt, self).InitFromArguments(**kw)
for client_id in self.state.files_by_client:
rdfvalue.ClientURN.Validate(client_id)
@flow.StateHandler(next_state=["MarkDone"])
def Start(self, responses):
"""Start."""
client_id = responses.request.client_id
try:
file_list = self.state.files_by_client[client_id]
except KeyError:
self.LogClientError(client_id,
"No files found for client %s." % client_id)
self.MarkClientDone(client_id)
return
for pathspec in file_list:
self.CallFlow("GetFile", next_state="MarkDone", client_id=client_id,
pathspec=pathspec)
@flow.StateHandler()
def MarkDone(self, responses):
"""Mark a client as done."""
client_id = responses.request.client_id
if not responses.success:
self.LogClientError(client_id, log_message=utils.SmartStr(
responses.status))
else:
for response in responses:
msg = "Got file %s (%s)." % (response.aff4path, client_id)
self.LogResult(client_id, msg, urn=response.aff4path)
# This is not entirely accurate since it will mark the client as done as
# soon as the first flow is done.
self.MarkClientDone(client_id)
def ManuallyScheduleClients(self):
"""Schedule all flows without using the Foreman.
|
# Copyright 2017-2019 Tom Eulenfeld, MIT license
"""Stack correlations"""
import numpy as np
import obspy
from obspy import UTCDateTime as UTC
from yam.util import _corr_id, _time2sec, IterTime
def stack(stream, length=None, move=None):
"""
Stack traces in stream by correlation id
:param stream: |Stream| object with correlations
:param length: time span of one trace in the stack in seconds
(alternatively a string consisting of a number and a unit
-- ``'d'`` for days and ``'h'`` for hours -- can be specified,
i.e. ``'3d'`` stacks together all traces inside a three days time
window, default: None, which stacks together all traces)
:param move: define a moving stack, float or string,
default: None -- no moving stack,
if specified move usually is smaller than length to get an overlap
in the stacked traces
:return: |Stream| object with stacked correlations
"""
stream.sort()
stream_stack = obspy.Stream()
ids = {_corr_id(tr) for tr in stream}
ids.discard(None)
for id_ in ids:
traces = [tr for tr in stream if _corr_id(tr) == id_]
if length is None:
data = np.mean([tr.data for tr in traces], dtype='float16',
axis=0)
tr_stack = obspy.Trace(data, header=traces[0].stats)
tr_stack.stats.key = tr_stack.stats.key + '_s'
if 'num' in traces[0].stats:
tr_stack.stats.num = sum(tr.stats.num for tr in traces)
else:
tr_stack.stats.num = len(traces)
stream_stack.append(tr_stack)
else:
t1 = traces[0].stats.starttime
lensec = _time2sec(length)
movesec = _time2sec(move) if move else lensec
if (lensec % (24 * 3600) == 0 or
isinstance(length, str) and 'd' in length):
| t1 = UTC(t1.year, t1.month, t1.day)
elif (lensec % 3600 == 0 or
isinstance(length, str) and 'm' in length):
t1 = UTC(t1.year, t1.month, t1.day, t1.hour)
| t2 = max(t1, traces[-1].stats.endtime - lensec)
for t in IterTime(t1, t2, dt=movesec):
sel = [tr for tr in traces
if -0.1 <= tr.stats.starttime - t <= lensec + 0.1]
if len(sel) == 0:
continue
data = np.mean([tr.data for tr in sel], dtype='float16',
axis=0)
tr_stack = obspy.Trace(data, header=sel[0].stats)
key_add = '_s%s' % length + (move is not None) * ('m%s' % move)
tr_stack.stats.key = tr_stack.stats.key + key_add
tr_stack.stats.starttime = t
if 'num' in traces[0].stats:
tr_stack.stats.num = sum(tr.stats.num for tr in sel)
else:
tr_stack.stats.num = len(sel)
stream_stack.append(tr_stack)
return stream_stack
|
# -*- coding: UTF-8 -*-
# | Copyright 2011-2015 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""
The :xfile:`models.py` module for :ref:`cosi`.
This is e | mpty.
"""
|
"""Color palettes in addition to matplotlib's palettes."""
from typing import Mapping, Sequence
from matplotlib import cm, colors
# Colorblindness adjusted vega_10
# See https://github.com/theislab/scanpy/issues/387
vega_10 = list(map(colors.to_hex, cm.tab10.colors))
vega_10_scanpy = | vega_ | 10.copy()
vega_10_scanpy[2] = '#279e68' # green
vega_10_scanpy[4] = '#aa40fc' # purple
vega_10_scanpy[8] = '#b5bd61' # kakhi
# default matplotlib 2.0 palette
# see 'category20' on https://github.com/vega/vega/wiki/Scales#scale-range-literals
vega_20 = list(map(colors.to_hex, cm.tab20.colors))
# reorderd, some removed, some added
vega_20_scanpy = [
# dark without grey:
*vega_20[0:14:2],
*vega_20[16::2],
# light without grey:
*vega_20[1:15:2],
*vega_20[17::2],
# manual additions:
'#ad494a',
'#8c6d31',
]
vega_20_scanpy[2] = vega_10_scanpy[2]
vega_20_scanpy[4] = vega_10_scanpy[4]
vega_20_scanpy[7] = vega_10_scanpy[8] # kakhi shifted by missing grey
# TODO: also replace pale colors if necessary
default_20 = vega_20_scanpy
# https://graphicdesign.stackexchange.com/questions/3682/where-can-i-find-a-large-palette-set-of-contrasting-colors-for-coloring-many-d
# update 1
# orig reference http://epub.wu.ac.at/1692/1/document.pdf
zeileis_28 = [
"#023fa5",
"#7d87b9",
"#bec1d4",
"#d6bcc0",
"#bb7784",
"#8e063b",
"#4a6fe3",
"#8595e1",
"#b5bbe3",
"#e6afb9",
"#e07b91",
"#d33f6a",
"#11c638",
"#8dd593",
"#c6dec7",
"#ead3c6",
"#f0b98d",
"#ef9708",
"#0fcfc0",
"#9cded6",
"#d5eae7",
"#f3e1eb",
"#f6c4e1",
"#f79cd4",
# these last ones were added:
'#7f7f7f',
"#c7c7c7",
"#1CE6FF",
"#336600",
]
default_28 = zeileis_28
# from http://godsnotwheregodsnot.blogspot.de/2012/09/color-distribution-methodology.html
godsnot_102 = [
# "#000000", # remove the black, as often, we have black colored annotation
"#FFFF00",
"#1CE6FF",
"#FF34FF",
"#FF4A46",
"#008941",
"#006FA6",
"#A30059",
"#FFDBE5",
"#7A4900",
"#0000A6",
"#63FFAC",
"#B79762",
"#004D43",
"#8FB0FF",
"#997D87",
"#5A0007",
"#809693",
"#6A3A4C",
"#1B4400",
"#4FC601",
"#3B5DFF",
"#4A3B53",
"#FF2F80",
"#61615A",
"#BA0900",
"#6B7900",
"#00C2A0",
"#FFAA92",
"#FF90C9",
"#B903AA",
"#D16100",
"#DDEFFF",
"#000035",
"#7B4F4B",
"#A1C299",
"#300018",
"#0AA6D8",
"#013349",
"#00846F",
"#372101",
"#FFB500",
"#C2FFED",
"#A079BF",
"#CC0744",
"#C0B9B2",
"#C2FF99",
"#001E09",
"#00489C",
"#6F0062",
"#0CBD66",
"#EEC3FF",
"#456D75",
"#B77B68",
"#7A87A1",
"#788D66",
"#885578",
"#FAD09F",
"#FF8A9A",
"#D157A0",
"#BEC459",
"#456648",
"#0086ED",
"#886F4C",
"#34362D",
"#B4A8BD",
"#00A6AA",
"#452C2C",
"#636375",
"#A3C8C9",
"#FF913F",
"#938A81",
"#575329",
"#00FECF",
"#B05B6F",
"#8CD0FF",
"#3B9700",
"#04F757",
"#C8A1A1",
"#1E6E00",
"#7900D7",
"#A77500",
"#6367A9",
"#A05837",
"#6B002C",
"#772600",
"#D790FF",
"#9B9700",
"#549E79",
"#FFF69F",
"#201625",
"#72418F",
"#BC23FF",
"#99ADC0",
"#3A2465",
"#922329",
"#5B4534",
"#FDE8DC",
"#404E55",
"#0089A3",
"#CB7E98",
"#A4E804",
"#324E72",
]
default_102 = godsnot_102
def _plot_color_cycle(clists: Mapping[str, Sequence[str]]):
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
fig, axes = plt.subplots(nrows=len(clists)) # type: plt.Figure, plt.Axes
fig.subplots_adjust(top=0.95, bottom=0.01, left=0.3, right=0.99)
axes[0].set_title('Color Maps/Cycles', fontsize=14)
for ax, (name, clist) in zip(axes, clists.items()):
n = len(clist)
ax.imshow(
np.arange(n)[None, :].repeat(2, 0),
aspect='auto',
cmap=ListedColormap(clist),
norm=BoundaryNorm(np.arange(n + 1) - 0.5, n),
)
pos = list(ax.get_position().bounds)
x_text = pos[0] - 0.01
y_text = pos[1] + pos[3] / 2.0
fig.text(x_text, y_text, name, va='center', ha='right', fontsize=10)
# Turn off all ticks & spines
for ax in axes:
ax.set_axis_off()
fig.show()
if __name__ == '__main__':
_plot_color_cycle(
{name: colors for name, colors in globals().items() if isinstance(colors, list)}
)
|
# -*- coding:utf-8 -*-
'''
Test
'''
import sys
sys.path.append('.')
from tornado.testing import AsyncHTTPSTestCase
from application import APP
class TestSomeHandler(AsyncHTTPSTestCase):
'''
Test
'''
de | f get_app(self):
'''
Test
'''
return APP
def test_index(self):
'''
Test index.
'''
response = self.fetch('/')
self.assertEqual(response.co | de, 200)
|
""
__MATCH_TYPES = MATCH_ALL, MATCH_ANY = (0, 1)
def __init__(self, match_type):
self._match_type = match_type
# Key: function (rule_func)
# Value: tuple (rule_func_args) or ObjectFilter instance (a subfilter)
self._filter_items = {}
@property
def match_type(self):
return self._match_type
def has_rule(self, rule_func):
return rule_func in self._filter_items
def add_rule(self, rule_func, *rule_func_args):
"""
Add the specified rule as a function to the filter.
If `rule_func` already exists in the filter, nothing happens.
If you need to later remove the rule from the filter (using the
`remove_rule()` method), pass a named function rather than an inline lambda
expression. Alternatively, you can use `add_rule_temp()` for temporary
filters.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
if self.has_rule(rule_func):
return
if not callable(rule_func):
raise TypeError("not a function")
if len(inspect.getargspec(rule_func)[0]) < 1:
raise TypeError("function must have at least one argument (the object to match)")
self._filter_items[rule_func] = rule_func_args
def remove_rule(self, rule_func, raise_if_not_found=True):
"""
Remove the rule (`rule_func` function) from the filter.
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
found in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_rule(rule_func):
del self._filter_items[rule_func]
else:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
@contextmanager
def add_rule_temp(self, rule_func, *rule_func_args):
"""
Temporarily add a rule. Use as a context manager:
with filter.add_rule_temp(rule_func):
# do stuff
If `rule_func` already exists in the filter, the existing rule will not be
overridden and will not be removed.
Parameters:
* `rule_func` - Function to filter objects by. The function must always have
at least one argument - the object to match (used by the `is_match()`
method).
* `*rule_func_args` - Arguments for the `rule_func` function.
Raises:
* `TypeError` - `rule_func` is not callable.
* `ValueError` - `rule_func` does not have at least one argument.
"""
has_rule_already = self.has_rule(rule_func)
if not has_rule_already:
self.add_rule(rule_func, *rule_func_args)
try:
yield
finally:
if not has_rule_already:
self.remove_rule(rule_func)
@contextmanager
def remove_rule_temp(self, rule_func, raise_if_not_found=True):
"""
Temporarily remove a rule. Use as a context manager:
with filter.remove_rule_temp(rule_func):
# do stuff
Parameters:
* `rule_func` - Function to remove from the filter.
* `raise_if_not_found` - If True, raise `ValueError` if `rule_func` is not
in the filter.
Raises:
* `ValueError` - `rule_func` is not found in the filter and
`raise_if_not_found` is True.
"""
has_rule = self.has_rule(rule_func)
if not has_rule:
if raise_if_not_found:
raise ValueError("\"" + str(rule_func) + "\" not found in filter")
else:
rule_func_args = self._filter_items[rule_func]
self.remove_rule(rule_func)
try:
yield
finally:
if has_rule:
self.add_rule(rule_func, *rule_func_args)
def has_subfilter(self, subfilter_name):
return subfilter_name in self._filter_items
def add_subfilter(self, subfilter_name, subfilter):
"""
Add the specified subfilter (`ObjectFilter` instance) to the filter.
The subfilter can be later accessed by the `get_subfilter` method.
Raises:
* `ValueError` - `subfilter_name` already exists in the filter.
"""
if self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" already exists in the filter")
if not isinstance(subfilter, ObjectFilter):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" is not a subfilter")
self._filter_items[subfilter_name] = subfilter
def get_subfilter(self, subfilter_name):
"""
Get the subfilter specified by its name.
Raises:
* `ValueError` - `subfilter_name` does not exist in the filter or the value
associated with `subfilter_name` is not a subfilter.
"""
if not self.has_subfilter(subfilter_name):
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
item = self._filter_items[subfilter_name]
return item
# Provide alias to `get_subfilter` for easier access.
__getitem__ = get_subfilter
def remove_subfilter(self, subfilter_name, raise_if_not_found=True):
"""
Remove the subfilter with the corresponding subfilter name.
Parameters:
* `subfilter name` - Subfilter name.
* `raise_if_not_found` - If True, raise `ValueError` if `subfilter_name`
is not found in the filter.
Raises:
* `ValueError` - `subfilter_name` is not found in the filter and
`raise_if_not_found` is True.
"""
if self.has_subfilter(subfilter_name):
del self._filter_items[subfilter_name]
else:
if raise_if_not_found:
raise ValueError("subfilter nam | ed \"" + str(subfilter_name) + "\" not found in filter")
@contextmanager
def add_subfilter_temp(self, subfilter_name, su | bfilter):
"""
Temporarily add a subfilter. Use as a context manager:
with filter.add_subfilter_temp(subfilter_name, subfilter):
# do stuff
Raises:
* `ValueError` - `subfilter_name` already exists in the filter.
"""
self.add_subfilter(subfilter_name, subfilter)
try:
yield
finally:
self.remove_subfilter(subfilter_name)
@contextmanager
def remove_subfilter_temp(self, subfilter_name, raise_if_not_found=True):
"""
Temporarily remove a subfilter. Use as a context manager:
with filter.remove_subfilter_temp(subfilter_name):
# do stuff
Parameters:
* `subfilter name` - Subfilter name.
* `raise_if_not_found` - If True, raise `ValueError` if `subfilter_name`
is not found in the filter.
Raises:
* `ValueError` - `subfilter_name` is not found in the filter and
`raise_if_not_found` is True.
"""
has_subfilter = self.has_subfilter(subfilter_name)
if not has_subfilter:
if raise_if_not_found:
raise ValueError("subfilter named \"" + str(subfilter_name) + "\" not found in filter")
else:
subfilter = self._filter_items[subfilter_name]
self.remove_subfilter(subfilter_name)
try:
yield
finally:
if has_subfilter:
self.add_subfilter(subfilter_name, subfilter)
def is_match(self, object_to_match):
"""
If `match_type` attribute is `MATCH_ALL`, return True if `object_to_match`
matches all specified filter rules and all top-level subfilters return True.
Otherwise return False.
If `match_type` attribute is `MATCH_ANY`, return True if `object_to_match`
matches at least one specified filter rule or at least one top-level
subfilter returns True. Otherwise return False.
If no filter rules are specified, return True.
"""
if not self._filter_ |
from orotangi.models import Books | , Notes
from rest_framework import serializers
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class BookSerializer(serializers.ModelSerializer):
class Meta:
model = Books
fields = '__all__'
class NoteSerializer(serializers.ModelSerializer):
class Meta:
model = N | otes
fields = ('id', 'user', 'book', 'url', 'title', 'content',
'date_created', 'date_modified', 'date_deleted', 'status')
|
#!/usr/bin/env python
"""
m2g.utils.qa_utils
~~~~~~~~~~~~~~~~~~~~ |
Contains small-scale qa utilities
"""
import numpy as np
def get_min_max(data, minthr=2, maxthr=95):
"""
A function to find | min,max values at designated percentile thresholds
Parameters
-----------
data: np array
3-d regmri data to threshold.
minthr: int
lower percentile threshold
maxthr: int
upper percentile threshold
Returns
-----------
min_max: tuple
tuple of minimum and maximum values
"""
min_val = np.percentile(data, minthr)
max_val = np.percentile(data, maxthr)
min_max = (min_val.astype(float), max_val.astype(float))
return min_max
def opaque_colorscale(basemap, reference, vmin=None, vmax=None, alpha=1):
"""
A function to return a colorscale, with opacities
dependent on reference intensities.
Parameters
---------
basemap: matplotlib colormap
the colormap to use for this colorscale.
reference: np array
the reference matrix.
Returns
---------
cmap = matplotlib colormap
"""
if vmin is not None:
reference[reference > vmax] = vmax
if vmax is not None:
reference[reference < vmin] = vmin
cmap = basemap(reference)
maxval = np.nanmax(reference)
# all values beteween 0 opacity and 1
opaque_scale = alpha * reference / float(maxval)
# remaps intensities
cmap[:, :, 3] = opaque_scale
return cmap
def pad_im(image,max_dim,pad_val=255,rgb=False):
"""
Pads an image to be same dimensions as given max_dim
Parameters
-----------
image: np array
image object can be multiple dimensional or a slice.
max_dim: int
dimension to pad up to
pad_val: int
value to pad with. default is 255 (white) background
rgb: boolean
flag to indicate if RGB and last dimension should not be padded
Returns
-----------
padded_image: np array
image with padding
"""
pad_width = []
for i in range(image.ndim):
pad_width.append(((max_dim-image.shape[i])//2,(max_dim-image.shape[i])//2))
if rgb:
pad_width[-1] = (0,0)
pad_width = tuple(pad_width)
padded_image = np.pad(image, pad_width=pad_width, mode='constant', constant_values=pad_val)
return padded_image |
lete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import logging
import re
import time
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from sps.openstack.common.db import exception
from sps.openstack.common.gettextutils import _LE, _LW
from sps.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_ | RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide | the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
assert issubclass(
self.__class__, sqlalchemy.orm.session.Session
), ('_wrap_db_error() can only be applied to methods of '
'subclasses of sqlalchemy.orm.session.Session.')
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that |
"""
==============================
Generate simulated evoked data
==============================
"""
# Author: Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
from mne import (read_proj, read_forward_solution, read_cov, read_label,
pick_types_forward, pick_types)
from mne.io import Raw, read_info
from mne.datasets import sample
from mne.time_frequency import fit_iir_model_raw
from mne.viz import plot_sparse_source_estimates
from mne.simulation import simulate_sparse_stc, simulate_evoked
print(__doc__)
###############################################################################
# Load real data as templates
data_path = sample.data_path()
raw = Raw(data_path + '/MEG/sample/sample_audvis_raw.fif')
proj = read_proj(data_path + '/MEG/sample/sample_audvis_ecg_proj.fif')
raw.info['projs'] += proj
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # mark bad channels
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-cov.fif'
fwd = read_forward_solution(fwd_fname, force_fixed=True, surf_ori=True)
fwd = pick_types_forward(fwd, meg=True, eeg=True, exclude=raw.info['bads'])
cov = read_cov(cov_fname)
info = read_info(ave_fname)
label_names = ['Aud-lh', 'Aud-rh']
labels = [read_label(data_path + '/MEG/sample/labels/%s.label' % ln)
for ln in label_names]
###############################################################################
# Generate source time courses from 2 dipoles and the correspond evoked data
times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1
rng = np.random.RandomState(42)
def data_fun(times):
"""Function to generate random source | time courses"""
return (1e-9 * np.sin(30. * times) *
np.exp(- (times - 0.15 + 0.05 * rng.randn(1)) ** 2 / 0.01))
stc = simulate_sparse_stc(fwd['src'], n_dipoles=2, times | =times,
random_state=42, labels=labels, data_fun=data_fun)
###############################################################################
# Generate noisy evoked data
picks = pick_types(raw.info, meg=True, exclude='bads')
iir_filter = fit_iir_model_raw(raw, order=5, picks=picks, tmin=60, tmax=180)[1]
snr = 6. # dB
evoked = simulate_evoked(fwd, stc, info, cov, snr, iir_filter=iir_filter)
###############################################################################
# Plot
plot_sparse_source_estimates(fwd['src'], stc, bgcolor=(1, 1, 1),
opacity=0.5, high_resolution=True)
plt.figure()
plt.psd(evoked.data[0])
evoked.plot()
|
import datetime
import arrow
def arrow_datetime(value, name):
try:
value = arrow.get(value).datetime
except Exception as e:
raise ValueError(e)
return value
class BaseFilter(object):
# TODO: Move this class to be part of API FiltrableResource
# Leaving implementation to be defined by base class
name = None
value_type = None
allow_multiple = None
@classmethod
def condition(cls, *args, **kwargs):
raise NotImplementedError()
@classmethod
def apply(cls, model, *args, **kwargs):
return model.find(cls.condition(*args, **kwargs), limit=kwargs.get('limit', 0))
class DateFilter(BaseFilter):
name = 'date'
value_type = arrow_datetime
allow_multiple = False
@classmethod
def condition(cls, date_value, **kwargs):
return {'date': date_value}
class DateRangeFilter(BaseFilter):
name = 'date_range'
value_type = arrow_datetime
allow_multiple = True
@classmethod
def condition(cls, from_date, to_date, **kwargs):
return {'date': {"$gte": from_date, "$lte": to_date}}
class AccountFilter(BaseFilter):
name = 'account_name'
value_type = str
allow_multiple = False
@classmeth | od
def condition(cls, account_name):
return {
'account': {
"$regex": '.*?{}.*?'.format(account_name),
"$options": 'si'
}
| }
class NameFilter(BaseFilter):
name = 'name'
value_type = str
allow_multiple = False
@classmethod
def condition(cls, name):
return {
'name': {
"$regex": '.*?{}.*?'.format(name),
"$options": 'si'
}
}
class TagFilter(BaseFilter):
name = 'tag'
value_type = str
allow_multiple = True
@classmethod
def condition(cls, *tags, **kwargs):
return {
'tags': {
"$elemMatch": {
"$regex": ".*?{}.*?".format('|'.join(tags)),
"$options": "si"
}
}
}
|
#!/usr/bin/python
import sys
import re
import string
import httplib
import urllib2
import re
def StripTags(text):
finished = 0
while not finished:
finished = 1
start = text.find("<")
if start >= 0:
stop = text[start:].find(">")
if stop >= 0:
text = text[:start] + text[start+stop+1:]
finished = 0
return text
if len(sys.argv) != 2:
print "\nExtracts emails from google results.\n"
print "\nUsage : ./goog-mail.py <domain-name>\n"
sys.exit(1)
domain_name=sys.argv[1]
d={}
page_counter = 0
try:
while page_counter < 50 :
results = 'http://groups.goog | le.com/groups?q='+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter) + '&sa=N'
request = urllib2.Request(results)
request.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener = urllib2.build_opener() |
text = opener.open(request).read()
emails = (re.findall('([\w\.\-]+@'+domain_name+')',StripTags(text)))
for email in emails:
d[email]=1
uniq_emails=d.keys()
page_counter = page_counter +10
except IOError:
print "Can't connect to Google Groups!"+""
page_counter_web=0
try:
print "\n\n+++++++++++++++++++++++++++++++++++++++++++++++++++++"+""
print "+ Google Web & Group Results:"+""
print "+++++++++++++++++++++++++++++++++++++++++++++++++++++\n\n"+""
while page_counter_web < 50 :
results_web = 'http://www.google.com/search?q=%40'+str(domain_name)+'&hl=en&lr=&ie=UTF-8&start=' + repr(page_counter_web) + '&sa=N'
request_web = urllib2.Request(results_web)
request_web.add_header('User-Agent','Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0)')
opener_web = urllib2.build_opener()
text = opener_web.open(request_web).read()
emails_web = (re.findall('([\w\.\-]+@'+domain_name+')',StripTags(text)))
for email_web in emails_web:
d[email_web]=1
uniq_emails_web=d.keys()
page_counter_web = page_counter_web +10
except IOError:
print "Can't connect to Google Web!"+""
for uniq_emails_web in d.keys():
print uniq_emails_web+""
|
from django.test import TestCase, tag
from member.tests.test_mixins import MemberMixin
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.urls.base import reverse
from enumeration.views import DashboardView, ListBoardView
class TestEnumeration(MemberMixin, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='erik')
self.household_structure = self.make_household_ready_for_enumeration(make_hoh=False)
def test_dashboard_view(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response = DashboardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_dashboard_view2(self):
url = reverse('enumeration:dashboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
self.client.force_login(self.user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_list_view1(self):
url = reverse('enumeration:listboard_url')
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view2(self):
url = reverse('enumeration:listboard_url', kwargs=dict(page=1))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
def test_list_view3(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier))
request = self.factory.get(url)
request.user | = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view4(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
household_identifier=self.household_structure.household.household_identifier,
survey=self.household_structure.survey))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
def test_list_view5(self):
url = reverse('enumeration:listboard_url', kwargs=dict(
plot_identifier=self.household_structure.household.plot.plot_identifier))
request = self.factory.get(url)
request.user = self.user
response = ListBoardView.as_view()(request)
self.assertEqual(response.status_code, 200)
|
"""
arc - dead simple chat
Copyright (C) 2017 Jewel Mahanta <jewelmahanta@gmail.com>
This file is part of arc.
arc is free software: you can redistribute it and/or modify
it under the terms of the | GNU General Public License as published by
t | he Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
arc is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with arc. If not, see <http://www.gnu.org/licenses/>.
"""
import time
import math
ARC_EPOCH = 1496595546533
class Snowflake:
"""
Arc snowflake has the following structure
+------------------+-----------------+-----------------+
| 41 bit timestamp | 13 bit shard_id | 10 bit sequence |
+------------------+-----------------+-----------------+
We use our custom epoch. Each components have the following
upper limits:
timestamp (2^41) - 1 = 2199023255551
shard_id (2^13) - 1 = 8191
sequence (2^10) - 1 = 1023
So roughly speaking, we can generate 1024 id's per millisecond
per shard.
Credits:
--------
This id generation technique borrows heavily from instagram's
implementation of twitter snowflake. You can read more about
it here: https://engineering.instagram.com/sharding-ids-at-instagram-1cf5a71e5a5c
"""
def __init__(self, shard_id=0):
self.last_timestamp = 0
self.sequence = 0
self.SHARD_ID = shard_id
def generate(self):
timestamp = (math.floor(time.time()) * 1000) - ARC_EPOCH
if self.last_timestamp == timestamp:
self.sequence += 1
else:
self.sequence = 0
if self.sequence >= 1023:
print("Sleeping")
time.sleep(1/1000)
self.last_timestamp = timestamp
gen_id = (timestamp << 23) + (self.SHARD_ID << 10) + self.sequence
return gen_id
snowflake = Snowflake()
|
import json
data = '''{
"name" : "Chuck",
"phone": {
"type" : "int1",
"number" : "+1 | 734 303 4456"
},
"email": {
"hide" : "yes"
}
}'''
info = | json.loads(data)
print('Name:',info["name"])
print('Hide:',info["email"]["hide"])
|
perty
def mime_type(self):
if self.data:
return _image_mime_type(self.data)
@property
def type_index(self):
if self.type is None:
return None
return list(self.TYPES).index(self.type)
# StorageStyle classes describe strategies for accessing values in
# Mutagen file objects.
class StorageStyle(object):
"""A strategy for storing a value for a certain tag format (or set
of tag formats). This basic StorageStyle describes simple 1:1
mapping from raw values to keys in a Mutagen file object; subclasses
describe more sophisticated translations or format-specific access
strategies.
MediaFile uses a StorageStyle via two methods: ``get()`` and
``set()``. It passes a Mutagen file object to each.
Internally, the StorageStyle implements ``get()`` and ``set()``
using two steps that may be overridden by subtypes. To get a value,
the StorageStyle first calls ``fetch()`` to retrieve the value
corresponding to a key and then ``deserialize()`` to convert the raw
Mutagen value to a consumable Python value. Similarly, to set a
field, we call ``serialize()`` to encode the value and then
``store()`` to assign the result into the Mutagen object.
Each StorageStyle type has a class-level `formats` attribute that is
a list of strings indicating the formats that the style applies to.
MediaFile only uses StorageStyles that apply to the correct type for
a given audio file.
"""
formats = ['FLAC', 'OggOpus', 'OggTheora', 'OggSpeex', 'OggVorbis',
'OggFlac', 'APEv2File', 'WavPack', 'Musepack', 'MonkeysAudio']
"""List of mutagen classes the StorageStyle can handle.
"""
def __init__(self, key, as_type=unicode, suffix=None, float_places=2):
"""Create a basic storage strategy. Parameters:
- `key`: The key on the Mutagen file object used to access the
field's data.
- `as_type`: The Python type that the value is stored as
internally (`unicode`, `int`, `bool`, or `bytes`).
| - `suffix`: When `as_type` is a string type, append this before
storing the value.
- `float_places`: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
decimal point.
"""
self.key = key
self.as_type = as_type
self.suffix = suffix
self.float_places = float_places
# Convert suffix to correct string type.
if self.suffix and se | lf.as_type is unicode:
self.suffix = self.as_type(self.suffix)
# Getter.
def get(self, mutagen_file):
"""Get the value for the field using this style.
"""
return self.deserialize(self.fetch(mutagen_file))
def fetch(self, mutagen_file):
"""Retrieve the raw value of for this tag from the Mutagen file
object.
"""
try:
return mutagen_file[self.key][0]
except KeyError:
return None
def deserialize(self, mutagen_value):
"""Given a raw value stored on a Mutagen object, decode and
return the represented value.
"""
if self.suffix and isinstance(mutagen_value, unicode) \
and mutagen_value.endswith(self.suffix):
return mutagen_value[:-len(self.suffix)]
else:
return mutagen_value
# Setter.
def set(self, mutagen_file, value):
"""Assign the value for the field using this style.
"""
self.store(mutagen_file, self.serialize(value))
def store(self, mutagen_file, value):
"""Store a serialized value in the Mutagen file object.
"""
mutagen_file[self.key] = [value]
def serialize(self, value):
"""Convert the external Python value to a type that is suitable for
storing in a Mutagen file object.
"""
if isinstance(value, float) and self.as_type is unicode:
value = u'{0:.{1}f}'.format(value, self.float_places)
value = self.as_type(value)
elif self.as_type is unicode:
if isinstance(value, bool):
# Store bools as 1/0 instead of True/False.
value = unicode(int(bool(value)))
elif isinstance(value, str):
value = value.decode('utf8', 'ignore')
else:
value = unicode(value)
else:
value = self.as_type(value)
if self.suffix:
value += self.suffix
return value
class ListStorageStyle(StorageStyle):
"""Abstract storage style that provides access to lists.
The ListMediaField descriptor uses a ListStorageStyle via two
methods: ``get_list()`` and ``set_list()``. It passes a Mutagen file
object to each.
Subclasses may overwrite ``fetch`` and ``store``. ``fetch`` must
return a (possibly empty) list and ``store`` receives a serialized
list of values as the second argument.
The `serialize` and `deserialize` methods (from the base
`StorageStyle`) are still called with individual values. This class
handles packing and unpacking the values into lists.
"""
def get(self, mutagen_file):
"""Get the first value in the field's value list.
"""
try:
return self.get_list(mutagen_file)[0]
except IndexError:
return None
def get_list(self, mutagen_file):
"""Get a list of all values for the field using this style.
"""
return [self.deserialize(item) for item in self.fetch(mutagen_file)]
def fetch(self, mutagen_file):
"""Get the list of raw (serialized) values.
"""
try:
return mutagen_file[self.key]
except KeyError:
return []
def set(self, mutagen_file, value):
"""Set an individual value as the only value for the field using
this style.
"""
self.set_list(mutagen_file, [value])
def set_list(self, mutagen_file, values):
"""Set all values for the field using this style. `values`
should be an iterable.
"""
self.store(mutagen_file, [self.serialize(value) for value in values])
def store(self, mutagen_file, values):
"""Set the list of all raw (serialized) values for this field.
"""
mutagen_file[self.key] = values
class SoundCheckStorageStyleMixin(object):
"""A mixin for storage styles that read and write iTunes SoundCheck
analysis values. The object must have an `index` field that
indicates which half of the gain/peak pair---0 or 1---the field
represents.
"""
def get(self, mutagen_file):
data = self.fetch(mutagen_file)
if data is None:
return 0
else:
return _sc_decode(data)[self.index]
def set(self, mutagen_file, value):
data = self.fetch(mutagen_file)
if data is None:
gain_peak = [0, 0]
else:
gain_peak = list(_sc_decode(data))
gain_peak[self.index] = value or 0
data = self.serialize(_sc_encode(*gain_peak))
self.store(mutagen_file, data)
class ASFStorageStyle(ListStorageStyle):
"""A general storage style for Windows Media/ASF files.
"""
formats = ['ASF']
def deserialize(self, data):
if isinstance(data, mutagen.asf.ASFBaseAttribute):
data = data.value
return data
class MP4StorageStyle(StorageStyle):
"""A general storage style for MPEG-4 tags.
"""
formats = ['MP4']
def serialize(self, value):
value = super(MP4StorageStyle, self).serialize(value)
if self.key.startswith('----:') and isinstance(value, unicode):
value = value.encode('utf8')
return value
class MP4TupleStorageStyle(MP4StorageStyle):
"""A style for storing values as part of a pair of numbers in an
MPEG-4 file.
"""
def __init__(self, key, index=0, **kwargs):
super(MP4TupleStorageStyle, self).__init__(key, **kwargs)
self.index = index
def deserialize(self, mut |
"""
import os
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import default
from resource_management.libraries.functions import format_jvm_option
from resource_management.libraries.functions import format
from resource_management.libraries.functions.version import format_stack_version, compare_versions
from ambari_commons.os_check import OSCheck
from resource_management.libraries.script.script import Script
config = Script.get_config()
stack_version_unformatted = str(config['hostLevelParams']['stack_version'])
iop_stack_version = format_stack_version(stack_version_unformatted)
# hadoop default params
mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
hadoop_libexec_dir = stack_select.get_hadoop_dir("libexec")
hadoop_lib_home = stack_select.get_hadoop_dir("lib")
hadoop_bin = stack_select.get_hadoop_dir("sbin")
hadoop_home = '/usr'
create_lib_snappy_symlinks = True
# IOP 4.0+ params
if Script.is_stack_greater_or_equal("4.0"):
mapreduce_libs_path = "/usr/iop/current/hadoop-mapreduce-client/*"
hadoop_home = stack_select.get_hadoop_dir("home")
create_lib_snappy_symlinks = False
current_service = config['serviceName']
#security params
security_enabled = config['configurations']['cluster-env']['security_enabled']
#users and groups
has_hadoop_env = 'hadoop-env' in config['configurations']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_user = config['configurations']['hadoop-env']['hdfs_user']
yarn_user = config['configurations']['yarn-env']['yarn_user']
user_group = config['configurations']['cluster-env']['user_group']
#hosts
hostname = config["hostname"]
ambari_server_hostname = config['clusterHostInfo']['ambari_server_host'][0]
rm_host = default("/clusterHostInfo/rm_host", [])
slave_hosts = default("/clusterHostInfo/slave_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
hive_server_host = default("/clusterHostInfo/hive_server_host", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/hs_host", [])
jtnode_host = default("/clusterHostInfo/jtnode_host", [])
namenode_host = default("/clusterHostInfo/namenode_host", [])
zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
ams_collector_hosts = default("/clusterHostInfo/metrics_collector_hosts", [])
has_namenode = not len(namenode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_zk_host = not len(zk_hosts) == 0
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_metric_collector = not len(ams_collector_hosts) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
if has_metric_collector:
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_host' in config['configurations']['cluster-env']:
metric_collector_host = config['configurations']['cluster-env']['metrics_collector_vip_host']
else:
metric_collector_host = ams_collector_hosts[0]
if 'cluster-env' in config['configurations'] and \
'metrics_collector_vip_port' in config['configurations']['cluster-env']:
metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port']
else:
metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188")
if metric_collector_web_address.find(':') != -1:
metric_collector_port = metric_collector_web_address.split(':')[1]
else:
metric_collector_port = '6188'
pass
metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval" | , 60)
metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 60)
#hadoop params
if has_namenode:
hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
| hadoop_conf_dir = conf_select.get_hadoop_conf_dir(force_latest_on_upgrade=True)
task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
hadoop_pid_dir_prefix = config['configurations']['hadoop-env']['hadoop_pid_dir_prefix']
hdfs_log_dir_prefix = config['configurations']['hadoop-env']['hdfs_log_dir_prefix']
hbase_tmp_dir = "/tmp/hbase-hbase"
#db params
server_db_name = config['hostLevelParams']['db_name']
db_driver_filename = config['hostLevelParams']['db_driver_filename']
oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
ambari_server_resources = config['hostLevelParams']['jdk_location']
oracle_driver_symlink_url = format("{ambari_server_resources}oracle-jdbc-driver.jar")
mysql_driver_symlink_url = format("{ambari_server_resources}mysql-jdbc-driver.jar")
ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
if has_namenode and 'rca_enabled' in config['configurations']['hadoop-env']:
rca_enabled = config['configurations']['hadoop-env']['rca_enabled']
else:
rca_enabled = False
rca_disabled_prefix = "###"
if rca_enabled == True:
rca_prefix = ""
else:
rca_prefix = rca_disabled_prefix
#hadoop-env.sh
java_home = config['hostLevelParams']['java_home']
jsvc_path = "/usr/lib/bigtop-utils"
hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
namenode_opt_newsize = config['configurations']['hadoop-env']['namenode_opt_newsize']
namenode_opt_maxnewsize = config['configurations']['hadoop-env']['namenode_opt_maxnewsize']
namenode_opt_permsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_permsize","128m")
namenode_opt_maxpermsize = format_jvm_option("/configurations/hadoop-env/namenode_opt_maxpermsize","256m")
jtnode_opt_newsize = "200m"
jtnode_opt_maxnewsize = "200m"
jtnode_heapsize = "1024m"
ttnode_heapsize = "1024m"
dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
mapred_pid_dir_prefix = default("/configurations/mapred-env/mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
mapred_log_dir_prefix = default("/configurations/mapred-env/mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
#log4j.properties
yarn_log_dir_prefix = default("/configurations/yarn-env/yarn_log_dir_prefix","/var/log/hadoop-yarn")
dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
#log4j.properties
if (('hdfs-log4j' in config['configurations']) and ('content' in config['configurations']['hdfs-log4j'])):
log4j_props = config['configurations']['hdfs-log4j']['content']
if (('yarn-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-log4j'])):
log4j_props += config['configurations']['yarn-log4j']['content']
else:
log4j_props = None
refresh_topology = False
command_params = config["commandParams"] if "commandParams" in config else None
if command_params is not None:
refresh_topology = bool(command_params["refresh_topology"]) if "refresh_topology" in command_params else False
ambari_libs_dir = "/var/lib/ambari-agent/lib"
is_webhdfs_enabled = config['configurations']['hdfs-site']['dfs.webhdfs.enabled']
default_fs = config['configurations']['core-site']['fs.defaultFS']
#host info
all_hosts = default("/clusterHostInf |
# -*- coding: utf-8 -*-
#################### | ##########################################################
#
# OpenERP, Open Source Management Solution
# Copyright | (C) 2014 Smile (<http://www.smile.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import logging
from openerp.modules.registry import RegistryManager
from .misc import add_timing, add_trace
class SmileDBLogger:
def __init__(self, dbname, model_name, res_id, uid=0):
assert isinstance(uid, (int, long)), 'uid should be an integer'
self._logger = logging.getLogger('smile_log')
db = RegistryManager.get(dbname)._db
pid = 0
try:
cr = db.cursor()
cr.autocommit(True)
cr.execute("select relname from pg_class where relname='smile_log_seq'")
if not cr.rowcount:
cr.execute("create sequence smile_log_seq")
cr.execute("select nextval('smile_log_seq')")
res = cr.fetchone()
pid = res and res[0] or 0
finally:
cr.close()
self._logger_start = datetime.datetime.now()
self._logger_args = {'dbname': dbname, 'model_name': model_name, 'res_id': res_id, 'uid': uid, 'pid': pid}
@property
def pid(self):
return self._logger_args['pid']
def setLevel(self, level):
self._logger.setLevel(level)
def getEffectiveLevel(self):
return self._logger.getEffectiveLevel()
def debug(self, msg):
self._logger.debug(msg, self._logger_args)
def info(self, msg):
self._logger.info(msg, self._logger_args)
def warning(self, msg):
self._logger.warning(msg, self._logger_args)
def log(self, msg):
self._logger.log(msg, self._logger_args)
@add_trace
def error(self, msg):
self._logger.error(msg, self._logger_args)
@add_trace
def critical(self, msg):
self._logger.critical(msg, self._logger_args)
@add_trace
def exception(self, msg):
self._logger.exception(msg, self._logger_args)
@add_timing
def time_info(self, msg):
self._logger.info(msg, self._logger_args)
@add_timing
def time_debug(self, msg):
self._logger.debug(msg, self._logger_args)
|
import datetime
from django.contrib.auth.models import User
from django.core.management import call_command
from django.utils import timezone
from io import StringIO
from oppia.models import Tracker
from oppia.test import OppiaTestCase
class DataRetentionTest(OppiaTestCase):
fixtures = ['tests/test_user | .json',
'tests/test_oppia.json',
'default_gamification_events.json',
'tests/test_tracker.json',
'tests/test_permissions.json',
'default_badges.json',
'tests/test_course_permissions.json']
STR_NO_INPUT = '--noinput'
def test_data_retention_no_delete(self):
out = StringIO()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, st | dout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count, end_user_count)
def test_data_retention_old_user(self):
out = StringIO()
user = User()
user.username = "olduser"
user.last_login = timezone.make_aware(
datetime.datetime.strptime('2000-01-01', "%Y-%m-%d"),
timezone.get_current_timezone())
user.save()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, stdout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count-1, end_user_count)
def test_data_retention_old_user_new_tracker(self):
out = StringIO()
user = User()
user.username = "olduser"
user.last_login = timezone.make_aware(
datetime.datetime.strptime('2000-01-01', "%Y-%m-%d"),
timezone.get_current_timezone())
user.save()
tracker = Tracker()
tracker.user = user
tracker.save()
start_user_count = User.objects.all().count()
call_command('data_retention', self.STR_NO_INPUT, stdout=out)
end_user_count = User.objects.all().count()
self.assertEqual(start_user_count, end_user_count)
|
# Alien Blaster
# Demonstrates object interaction
class Player(object):
""" A player in a shooter game. """
def blast(self, enemy):
print("The player blasts an enemy.\n")
e | nemy.die()
class Alien(object):
""" An alien in a shooter game. """
def die(self):
print("The alien gasps and says, 'Oh, this is it. This is the big one. \n" \
"Yes, it's getting dark now. Tell my | 1.6 million larvae that I loved them... \n" \
"Good-bye, cruel universe.'")
# main
print("\t\tDeath of an Alien\n")
hero = Player()
invader = Alien()
hero.blast(invader)
input("\n\nPress the enter key to exit.")
|
from django import forms
from django.contrib import admin
from django.contrib.admin import ModelAdmin
from guardian.admin import GuardedModelAdmin
from | uploader.projects.models import FileSystem, Project
class FileSystemAdminForm(forms.ModelForm):
class Meta:
model = FileSystem
class ProjectAdmin(GuardedModelAdmin):
list_display = ('__unicode__', 'file_system', 'directory')
class FileSystemAdmin(ModelAdmin):
list_display = ('__unicode__', 'alias', 'mount_point')
form = F | ileSystemAdminForm
admin.site.register(FileSystem, admin_class=FileSystemAdmin)
admin.site.register(Project, admin_class=ProjectAdmin)
|
"""Tests for distutils.command.build_scripts."""
import os
import unittest
from distutils.command.build_scripts import build_scripts
from distutils.core import Distribution
import sysconfig
from distutils.tests import support
from test.test_support import run_unittest
class BuildScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
cmd = self.get_build_scripts_cmd("/foo/bar", [])
self.assertTrue(not cmd.force)
self.assertTrue(cmd.build_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertEqual(cmd.build_dir, "/foo/bar")
def test_build(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
cmd.run()
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def get_build_scripts_cmd(self, target, scripts):
import sys
dist = Distribution()
dist.scripts = scripts
dist.command_obj["build"] = support.DummyCommand(
build_scripts=target,
force=1,
executable=sys.executable
)
return build_scripts(dist)
def write_sample_scripts(self, dir):
expected = []
expected.append("script1.py")
self.write_script(dir, "script1.py",
("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("script2.py")
self | .write_script(dir, "script2.py",
("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
expected.append("shell.sh")
self.write_script(dir, "shell.sh",
("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
| "exit 0\n"))
return expected
def write_script(self, dir, name, text):
f = open(os.path.join(dir, name), "w")
try:
f.write(text)
finally:
f.close()
def test_version_int(self):
source = self.mkdtemp()
target = self.mkdtemp()
expected = self.write_sample_scripts(source)
cmd = self.get_build_scripts_cmd(target,
[os.path.join(source, fn)
for fn in expected])
cmd.finalize_options()
# http://bugs.python.org/issue4524
#
# On linux-g++-32 with command line `./configure --enable-ipv6
# --with-suffix=3`, python is compiled okay but the build scripts
# failed when writing the name of the executable
old = sysconfig.get_config_vars().get('VERSION')
sysconfig._CONFIG_VARS['VERSION'] = 4
try:
cmd.run()
finally:
if old is not None:
sysconfig._CONFIG_VARS['VERSION'] = old
built = os.listdir(target)
for name in expected:
self.assertTrue(name in built)
def test_suite():
return unittest.makeSuite(BuildScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
d | ef kth_smallest(arr, k):
n = len(arr)
a = 0
b = n
while a < b:
piv = a
for i in range(a, b):
if arr[piv] > arr[i]:
arr[i], arr[piv] = arr[piv], arr[i]
piv = i
if piv == k:
return arr[piv]
elif piv < k:
a = piv+1
else:
b = piv-1
arr = [9, 3, 5, 6, 1, 3, 3]
print arr
for i in r | ange(-1, len(arr)+1):
print i, kth_smallest(arr, i)
|
#!/usr/bin/python
import json
class Client():
def __init__(self, clientHostName, clientPort, channel):
self.clientHostName = clientHostName
self.clientPort = clientPort
self.clientType = self.getClientType()
self.channel = channel
# TO DO implement this method properly
def getClientType(self):
try:
self.WebClient = "Web Client"
self.MobileClient = "Mobile Client"
return self.WebClient
except ImportError as e:
print json.dumps | ({"status" : "error", "Client.getClientType" : str(e)})
exit | (0) |
from __future__ import unicode_literals
from django.c | ontrib.auth.forms import AuthenticationForm
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, HTML, Field
from authtools import forms as authtoolsforms
from django.contrib.auth import forms as authforms
from django.urls import revers | e
class LoginForm(AuthenticationForm):
remember_me = forms.BooleanField(required=False, initial=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields["username"].widget.input_type = "email" # ugly hack
self.helper.layout = Layout(
Field("username", placeholder="Enter Email", autofocus=""),
Field("password", placeholder="Enter Password"),
HTML(
'<a href="{}">Forgot Password?</a>'.format(
reverse("accounts:password-reset")
)
),
Field("remember_me"),
Submit("sign_in", "Log in", css_class="btn btn-lg btn-primary btn-block"),
)
class SignupForm(authtoolsforms.UserCreationForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.fields["email"].widget.input_type = "email" # ugly hack
self.helper.layout = Layout(
Field("email", placeholder="Enter Email", autofocus=""),
Field("name", placeholder="Enter Full Name"),
Field("password1", placeholder="Enter Password"),
Field("password2", placeholder="Re-enter Password"),
Submit("sign_up", "Sign up", css_class="btn-warning"),
)
class PasswordChangeForm(authforms.PasswordChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("old_password", placeholder="Enter old password", autofocus=""),
Field("new_password1", placeholder="Enter new password"),
Field("new_password2", placeholder="Enter new password (again)"),
Submit("pass_change", "Change Password", css_class="btn-warning"),
)
class PasswordResetForm(authtoolsforms.FriendlyPasswordResetForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("email", placeholder="Enter email", autofocus=""),
Submit("pass_reset", "Reset Password", css_class="btn-warning"),
)
class SetPasswordForm(authforms.SetPasswordForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.layout = Layout(
Field("new_password1", placeholder="Enter new password", autofocus=""),
Field("new_password2", placeholder="Enter new password (again)"),
Submit("pass_change", "Change Password", css_class="btn-warning"),
)
|
# -*- coding: utf-8 -*-
from __future__ import uni | code_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('blocks', '0004_auto_20160305_2025'),
]
operations = [
migrations.AddField(
| model_name='blockmodel',
name='difficulty',
field=models.FloatField(default=1.0, help_text='real number between -1 (easiest) and 1 (most difficult)'),
),
]
|
# Copyright (c) 2017, Lenovo. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing | permissions a | nd
# limitations under the License.
VERSION_TABLE = 'lenovo_alembic_version'
|
az. | plot_kde(mu_posterior, cumulative=True | )
|
import pandas as pd
import numpy as np
import cobra
from pyefm.ElementaryFluxModes import EFMToolWrapper
from tqdm import tqdm
class EFVWrapper(EFMToolWrapper):
def create_matrices(self, extra_g=None, extra_h=None):
""" Initialize the augmented stoichiometric matrix.
extra_g: (n x nr) array
Extra entries in the constraint matrix. postive values for lower
bounds, negative values for upper bounds
extra_h: (n) array
Corresponding bounds for the extra entries matrix
"""
# Create stoichiometric matrix, get key dimensions
N = cobra.util.create_stoichiometric_matrix(self.model)
nm, nr = N.shape
self.nm = nm
self.nr = nr
# Construct full G and h matrices, then drop homogeneous (or near
# homogeneous) entries
g_full = np.vstack([np.eye(nr), -np.eye(nr)])
h_full = np.array([(r.lower_bound, -r.upper_bound)
for r in self.model.reactions]).T.flatten()
inhomogeneous = ~((h_full <= -1000) | np.isclose(h_full, 0))
h_full = h_full[inhomogeneous]
g_full = g_full[inhomogeneous]
if extra_g is not None:
assert extra_g.shape[1] == nr
assert extra_g.shape[0] == len(extra_h)
g_full = np.vstack([g_full, extra_g])
h_full = np.hstack([h_full, extra_h])
G = g_full
h = h_full
self.nt = nt = len(h)
self.D = np.vstack([
np.hstack([N, np.zeros((nm, nt)), np.zeros((nm, 1))]),
np.hstack([G, -np.eye(nt), np.atleast_2d(-h).T])
])
def create_model_files(self, temp_dir):
# Stoichiometric Matrix
np.savetxt(temp_dir + '/stoich.txt', self.D, delimiter='\t')
# Reaction reversibilities
np.savetxt(
temp_dir + '/revs.txt', np.hstack([
np.array([r.lower_bound < 0 for r in self.model.reactions]),
np.zeros((self.nt + 1))]),
delimiter='\t', fmt='%d', newline='\t')
# Reaction Names
r_names = np.hstack([
np.array([r.id for r in self.model.reactions]),
np.array(['s{}'.format(i) for i in range(self.nt)]),
np.array(['lambda'])
])
with open(temp_dir + '/rnames.txt', 'w') as f:
f.write('\t'.join(('"{}"'.format(name) for name in r_names)))
# Metabolite Names
m_names = np.hstack([
np.array([m.id for m in self.model.metabolites]),
np.array(['s{}'.format(i) for i in range(self.nt)]),
])
with open(temp_dir + '/mnames.txt', 'w') as f:
f.write('\t'.join(('"{}"'.format(name) for name in m_names)))
pass
def read_double_out(self, out_file):
with open(out_file, 'rb') as f:
out_arr = np.fromstring(f.read()[13:], dtype='>d').reshape(
(-1, self.nt + self.nr + 1)).T
out_arr = np.asarray(out_arr, dtype=np.float64).T
# Sort by the absolute value of the stoichiometry
sort_inds= np.abs(out_arr[:, :self.nr]).sum(1).argsort()
out_arr = out_arr[sort_inds]
unbounded = out_arr[np.isclose(out_arr[:,-1], 0.)]
bounded = out_arr[~np.isclose(out_arr[:,-1], 0.)]
if bounded.size: # Test if its empty
bounded /= np.atleast_2d(bounded[:,-1]).T
unbounded_df = pd.DataFrame(
unbounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
index=['UEV{}'.format(i)
for i in range(1, unbounded.shape[0] + 1)])
bounded_df = pd.DataFrame(
bounded[:, :self.nr],
columns=[r.id for r in self.model.reactions],
| index=('BEV{}'.format(i)
for i in range(1, bounded.shape[0] + 1)))
return unbounded_df.append(bounded_df)
def calculate_elementary_vectors(cobra_model, opts=None, verbose=True,
java_args=None, extra_g=None, extra_h=None):
"""Calculate elementary flux vectors, which capture arbitrary linear
constraints. Approach as detailed in S. Klamt et al., PLoS Comput Biol. 13,
e1005409–22 (20 | 17).
Augmented constraints as a hacky workaround for implementing more
complicated constraints without using optlang.
java_args: string
Extra command-line options to pass to the java virtual machine.
Eg. '-Xmx1g' will set the heap space to 1 GB.
extra_g: (n x nr) array
Extra entries in the constraint matrix. postive values for lower
bounds, negative values for upper bounds
extra_h: (n) array
Corresponding bounds for the extra entries matrix
"""
efv_wrap = EFVWrapper(cobra_model, opts, verbose, java_args=java_args)
efv_wrap.create_matrices(extra_g=extra_g, extra_h=extra_h)
return efv_wrap()
def get_support_minimal(efvs):
"""Return only those elementary flux vectors whose support is not a proper
superset of another EFV"""
bool_df = pd.DataFrame(np.isclose(efvs, 0),
columns=efvs.columns, index=efvs.index)
set_df = bool_df.apply(lambda x: set(x.index[~x]), 1)
set_df = set_df[set_df != set()] # Drop the empty set EFV
set_dict = set_df.to_dict()
is_support_minimal = _get_support_minimal_list(set_dict)
return efvs.loc[is_support_minimal]
def _get_support_minimal_list(set_dict):
all_keys = set(set_dict.keys())
is_support_minimal = []
for this_key, val in tqdm(set_dict.items()):
for key in all_keys.difference(set([this_key])):
if val.issuperset(set_dict[key]):
break
else:
is_support_minimal.append(this_key)
return is_support_minimal
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import re
from io import StringIO
from .strings import escape
EMBEDDED_NEWLINE_MATCHER = re.compile(r'[^\n]\n+[^\n]')
class PoFile(object):
def __init__(self):
self.header_fields = []
self._header_index = {}
self.entries = {}
def clone(self):
po_file = PoFile()
po_file.header_fields.extend(self.header_fields)
for msgid, entry in self.entries.items():
po_file.entries[msgid] = entry.clone()
return po_file
def add_header_field(self, field, value):
if field in self._header_index:
self.header_fields[self._header_index[field]] = (field, value)
else:
self._header_index[field] = len(self.header_fields)
self.header_fields.append((field, value))
def add_entry(self, message, plural=None, context=None):
msgid = get_msgid(message, context)
if msgid in self.entries:
entry = self.entries[msgid]
# Allow merging a non-plural entry with a plural entry
# If more than one plural entry only keep the first
if entry.plural is None:
entry.plural = plural
else:
entry = TranslationEntry(message, plural, context)
self.entries[msgid] = entry
return entry
def dump(self, fp, include_locations=True, prune_obsoletes=False):
needs_blank_line = False
if len(self.header_fields):
print('msgid ""', file=fp)
print('msgstr ""', file=fp)
for field, value in self.header_fields:
print(r'"{}: {}\n"'.format(field, value), file=fp)
needs_blank_line = True
nplurals = self.get_nplurals()
for entry in sorted(self.entries.values(), key=get_entry_sort_key):
if needs_blank_line:
print('', file=fp)
needs_blank_line = entry.dump(
fp, nplurals, include_locations=include_locations, prune_obsolete=prune_obsoletes)
def dumps(self, include_locations=True, prune_obsoletes=False):
string_file = StringIO()
self.dump(string_file, include_locations, prune_obsoletes)
return string_file.getvalue()
def get_catalog(self):
catalog = {}
for entry in self.entries.values():
entry.fill_catalog(catalog)
return catalog
def get_nplurals(self):
plural_field_index = self._header_index.get('Plural-Forms', -1)
if plural_field_index != -1:
field, value = self.header_fields[plural_field_index]
if field == 'Plural-Forms':
for pair in value.split(';'):
parts = pair.partition('=')
if parts[0].strip() == 'nplurals':
return int(parts[2].strip())
return None
class TranslationEntry(object):
MIN_NPLURALS = 2
def __init__(self, message, plural=None, context=None):
self.message = message
self.plural = plural
self.context = context
self.locations = []
self.translations = {}
def clone(self):
entry = TranslationEntry(self.message, self.plural, self.context)
entry.locations.extend(self.locations)
entry.translations = self.translations.copy()
return entry
def add_location(self, filename, lineno):
self.locations.append((filename, lineno))
def add_translation(self, translation):
self.add_plural_translation(0, translation)
def add_plural_translation(self, index, translation):
self.translations[index] = translation
def fill_catalog(self, catalog):
msgid = get_msgid(self.message, self.context)
if self.plural is not None:
for index, translation in self.translations.items():
if translation:
catalog[(msgid, index)] = translation
else:
translation = self.translations.get(0, '')
if translation:
catalog[msgid] = translation
def dump(self, fp, nplurals=None, include_locations=True, prune_obsolete=False):
"""
If plural, shows exactly 'nplurals' plurals if 'nplurals' is not None, else shows at least min_nplurals.
All plural index are ordered and consecutive, missing entries are displayed with an empty string.
"""
if not len(self.locations):
if prune_obsolete or all(translation == '' for index, translation in self.translations.items()):
return False
else:
print('#. obsolete entry', file=fp)
if include_locations and len(self.locations):
print('#: {}'.format(' '.join('{}:{}'.format(*location) for location in self.locations)), file=fp)
if self.context is not None:
print('msgctxt {}'.format(multiline_escape(self.context)), file=fp)
print('msgid {}'.format(multiline_escape(self.message)), file=fp)
if self.plural is not None:
print('msgid_plural {}'.format(multiline_escape(self.plural)), file=fp)
if nplurals is None:
nplurals = self.get_suggested_nplurals()
for index in range(nplurals):
print('msgstr[{}] {}'.format(index, multiline_escape(self.translations.get(index, ''))), file=fp)
else:
print('msgstr {}'.format(multiline_escape(self.translations.get(0, ''))), file=fp)
return True
def get_suggested_nplurals(self):
if len(self.translations) > 0:
return max(max(self.translations.keys()) + 1, self.MIN_NPLURALS)
else:
return self.MIN_NPLURALS
def multiline_escape(string):
if EMBEDDED_NEWLINE_MATCHER.search(string):
lines = string.split('\n')
return (
'""\n'
+ '\n'.join('"{}\\n"'.format(escape(line)) for line in lines[:-1])
+ ('\n"{}"'.format(escape(lines[-1])) if len(lines[-1]) else ""))
e | lse:
return '"{}"'.f | ormat(escape(string))
def get_msgid(message, context=None):
if context is not None:
return '{}\x04{}'.format(context, message)
else:
return message
def get_entry_sort_key(entry):
return entry.locations, entry.context if entry.context else '', entry.message
|
pdate':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by :meth:`EventLoopBase.idle()` to read events from input
providers, pass events to postproc, and dispatch final events.
'''
# first, acquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def mainloop(self):
while not self.quit and self.status == 'started':
try:
self.idle()
if self.window:
self.window.mainloop()
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
| stopTouchApp()
raise
else:
pass
async def async_mainloop(self):
from kivy.base import ExceptionManager, stopTouchApp
while not self.quit and self.status | == 'started':
try:
await self.async_idle()
if self.window:
self.window.mainloop()
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
Logger.info("Window: exiting mainloop and closing.")
self.close()
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
async def async_idle(self):
'''Identical to :meth:`idle`, but instead used when running
within an async event loop.
'''
# update dt
await Clock.async_tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _runTouchApp_prepare(widget=None, slave=False):
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# remove presplash on the next frame
if platform == 'android':
Clock.schedule_once(EventLoop.remove_android_splash)
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event loop
# ourself (previous behavior.)
#
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
See :mod:`kivy.app` for example usage.
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
|
import wx
import listControl as lc
import getPlugins as gpi
from decimal import Decimal
import os
class Plugin():
def OnSize(self):
# Respond to size change
self.bPSize = self.bigPanel.GetSize()
self.list.SetSize((self.bPSize[0] - 118, self.bPSize[1] - 40))
self.ButtonShow(False)
self.SetButtons()
self.ButtonShow(True)
def Refresh(self,record):
self.GetExec(record)
def Clear(self):
self.list.Show(False)
self.ButtonShow(False)
def ButtonShow(self,tf):
for b in self.buttons:
b.Show(tf)
def SetButtons(self):
self.views = gpi.GetPlugIns(
self.hd+r"\plugins\Views\BLASTViewPlugins")
xPos = 300
self.buttons = []
for v in self.views.values():
self.buttons.append(wx.Button(self.bigPanel, -1,
str(v.GetName()),
pos = (self.bPSize[0] * xPos / 747,
self.bPSize[1] - 35),
size = (90, 22),
style = wx.NO_BORDER))
self.buttons[-1].SetBackgroundColour(
self.colorList[v.GetColors()]['Back'])
self.buttons[-1].SetForegroundColour(
self.colorList[v.GetColors()]['Fore'])
xPos += 100
self.bigPanel.Bind(wx.EVT_BUTTON, self.DoView, self.buttons[-1])
def Init(self, parent, bigPanel, colorList):
self.hd = os.getcwd()
self.colorList = colorList
self.bigPanel = bigPanel
self.bPSize = self.bigPanel.GetSize()
self.list = lc.TestListCtrl(self.bigPanel, -1, size = (0,0),
pos = (self.bPSize[0] - 118,
self.bPSize[1] - 40),
style = wx.LC_REPORT|wx.LC_VIRTUAL,
numCols = 7)
self.list.SetBackgroundColour(
self.colorList['ViewPanelList']['Back'])
self.list.SetForegroundColour(
self.colorList['ViewPanelList']['Fore'])
self.SetButtons()
self.ListCntrlFill()
self.list.Show(True)
self.ButtonShow(False)
def GetExec(sel | f, Rec):
self.SetButtons()
self.list.Show(True)
self.ButtonShow(True)
self.BlastRec = Rec[0]
self.OnSelect(wx.EVT_IDLE)
def ListRefill(self):
listData = dict()
j = 0
for alignment in self.BlastRec.alignments:
for hsp in alignment.hsps:
listData[j] = (str(alignment.title), alignment.length,
hsp.score,
Decimal(hsp.expect | ).quantize(Decimal(10) ** -5),
hsp.identities, hsp.positives, hsp.gaps)
j += 1
self.list.Refill(listData)
def ListCntrlFill(self):
cols = ['Title', 'Length', 'Score', 'E Values',
'Idents.', 'Posits.', 'Gaps']
colWidths = [318, 50, 50, 59, 48, 48, 40]
self.list.Fill(cols, colWidths)
def OnSelect(self, event):
self.ListCntrlFill()
self.ListRefill()
def RecInfo(self):
pos = self.list.GetSelected()
matches = ['']
seqs = []
titles = []
alignment = self.BlastRec.alignments[self.list.itemIndexMap[0]]
titles.append('query')
for p in pos:
alignment = self.BlastRec.alignments[self.list.itemIndexMap[p]]
for hsp in alignment.hsps:
query = ''
i = 1
strtblnk = ''
while i < hsp.query_start:
strtblnk += '-'
query += self.BlastRec.alignments[0].hsps[0].query[i-1]
i += 1
query += hsp.query
i = 0
endblnk = ''
j = len(strtblnk)+len(hsp.query)
while i + j < len(self.BlastRec.alignments[0].hsps[0].query):
endblnk += '-'
query += self.BlastRec.alignments[0].hsps[0].query[j+i]
i += 1
t = str(alignment.title).split('|')
titles.append(str(t[0] + '|' + t[1]))
matches.append(strtblnk + str(hsp.match) + endblnk)
seqs.append(query)
seqs.append(strtblnk + str(hsp.sbjct) + endblnk)
return [matches,seqs,titles]
def DoView(self,event):
for v in self.views.values():
if v.GetName() == event.GetEventObject().GetLabelText():
v.Plugin().GetExec(self.RecInfo(), self.bigPanel, self.hd,
self.BlastRec.alignments,
self.BlastRec.application)
def GetType(self):
return "Blast Results"
def GetName(self):
return "BLASTView"
def GetType():
return "Blast Results"
def GetName():
return "BLASTView"
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################### | ##################### | ##########
from spack import *
class XcbUtilWm(AutotoolsPackage):
"""The XCB util modules provides a number of libraries which sit on top
of libxcb, the core X protocol library, and some of the extension
libraries. These experimental libraries provide convenience functions
and interfaces which make the raw X protocol more usable. Some of the
libraries also provide client-side code which is not strictly part of
the X protocol but which have traditionally been provided by Xlib."""
homepage = "https://xcb.freedesktop.org/"
url = "https://xcb.freedesktop.org/dist/xcb-util-wm-0.4.1.tar.gz"
version('0.4.1', '0831399918359bf82930124fa9fd6a9b')
depends_on('libxcb@1.4:')
depends_on('pkg-config@0.9.0:', type='build')
|
#!/usr/bin/env python
import sys
import optparse
import socket
def main():
p = optparse.OptionParser()
p.add_option("--port", "-p", default=8888)
p.add_option("--input", "-i", default="test.txt")
options, arguments = p.parse_args()
sock = socket.socket(socket.AF_INET, sock | et.SOCK_STREAM)
sock.connect(("localhost", options.port))
fp = open(options.input, "r")
ii = 0
sock.sendall ("^0^1^sheet1^1000000^3\n")
while ii < 1000000:
sock.sendall ("^%d^0^sheet1^%d^0^^0\n" %(ii, ii))
ii = ii + 1 |
sock.close()
if __name__ == '__main__':
main()
|
"""
This file is part of the splonebox python client library.
The splonebox python client library is free software: you can
redistribute it and/or modify it under the terms of the GNU Lesser
General Public License as published by the Free Software Foundation,
either version 3 of the License or any later version.
It is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this splonebox python client library. If not,
see <http://www.gnu.org/licenses/>.
"""
import ctypes
import unittest
import msgpack
from splonebox.api.plugin import Plugin
from splonebox.api.remoteplugin import RemotePlugin
from splonebox.api.core import Core
from splonebox.api.remotefunction import RemoteFunction
from splonebox.rpc.message import MRequest, MResponse
from threading import Lock
from test import mocks
class CompleteCall(unittest.TestCase):
def setUp(self):
# cleanup remote_functions
RemoteFunction.remote_functions = []
def test_complete_run(self):
# In this test a plugin is created and is calling itself.
# The called_lock is used to ensure that we receive
# the response before the result
called_lock = Lock()
called_lock.acquire()
def add(a: ctypes.c_int64, b: ctypes.c_int64):
"add two ints"
called_lock.acquire()
return a + b
RemoteFunction(add)
core = Core()
plug = Plugin("foo", "bar", "bob", "alice", core)
rplug = RemotePlugin("plugin_id", "foo", "bar", "bob", | "alice", core)
mock_send = mocks.rpc_connection_send(core._rpc)
result = rplug.run("add", [7, 8])
# receive request
msg = MRequest.from_unpacked(msgpack.unpackb(mock_send.call_args[0][
| 0]))
msg.arguments[0][0] = None # remove plugin id
msg.arguments[0][1] = 123 # set call id
core._rpc._message_callback(msg.pack())
# receive response
data = mock_send.call_args_list[1][0][0]
core._rpc._message_callback(data)
# start execution
called_lock.release()
# wait for execution to finish
plug._active_threads[123].join()
# receive result
data = mock_send.call_args_list[2][0][0]
core._rpc._message_callback(data)
self.assertEqual(result.get_result(blocking=True), 15)
self.assertEqual(result.get_status(), 2)
self.assertEqual(result._error, None)
self.assertEqual(result.get_id(), 123)
def test_complete_register(self):
def fun():
pass
RemoteFunction(fun)
core = Core()
plug = Plugin("foo", "bar", "bob", "alice", core)
mock_send = mocks.rpc_connection_send(core._rpc)
result = plug.register(blocking=False)
outgoing = msgpack.unpackb(mock_send.call_args_list[0][0][0])
# validate outgoing
self.assertEqual(0, outgoing[0])
self.assertEqual(b'register', outgoing[2])
self.assertEqual(
[b"foo", b"bar", b"bob", b"alice"], outgoing[3][0])
self.assertIn([b'fun', b'', []], outgoing[3][1])
# test response handling
self.assertEqual(result.get_status(), 0) # no response yet
response = MResponse(outgoing[1])
# send invalid response (Second field is set to None)
core._rpc._handle_response(response)
self.assertEqual(result.get_status(), -1)
# make sure response is only handled once
with self.assertRaises(KeyError):
core._rpc._handle_response(response)
# test valid response
result = plug.register(blocking=False)
outgoing = msgpack.unpackb(mock_send.call_args_list[1][0][0])
response = MResponse(outgoing[1])
response.response = []
core._rpc._handle_response(response)
self.assertEqual(result.get_status(), 2)
# cleanup remote_functions
RemoteFunction.remote_functions = {}
|
# -*- coding: utf-8 -*-
"""
github-indicator options
Author: Gabriel Patiño <gepatino@gmail.com>
License: Do whatever you want
"""
import optparse
import os
import xdg.BaseDirectory
from ghindicator import language
__version__ = (0, 0, 4)
# Hack to fix a missing function in my version of xdg
if not hasattr(xdg.BaseDirectory, 'save_cache_path'):
def save_cache_path(resource):
path = os.path.join('/', xdg.BaseDirectory.xdg_cache_home, resource)
if not os.path.exists(path):
os.makedirs(path)
return path
xdg.BaseDirectory.save_cache_path = save_cache_path
APPNAME = 'github-indicator'
ICON_DIR = os.path.realpath(os.path.join(os.path.dirname(__file__), 'icons'))
DATA_DIR = xdg.BaseDirectory.save_data_path(APPNAME)
CONFIG_DIR = xdg.BaseDirectory.save_config_path(APPNAME)
CACHE_DIR = xdg.BaseDirectory.save_cache_path(APPNAME)
parser = optparse.OptionParser(version='%prog ' + '.'.join(map(str, __version__)))
parser.add_option('-s', '--status-icon', action='store_true',
dest='status_icon', default=False,
help=_('Use a gtk status icon instead of appindicator'))
parser.add_option('-u', '--username', action='store',
dest='username', default=None,
help=_('GitHub username'))
parser.add_option('-p' | , '--password', action='store',
dest='password', default=None,
help=_('GitHub password (won\'t be saved)'))
parser.add_option('-t', '--update-time', action='store',
dest='update_time', default=60, type='int',
help=_('Checks for status updates after the specified amount of time [in seconds].'))
parser.add_option('-l', '--log-level | ', action='store',
dest='log_level', default='error',
help=_('Sets logging level to one of [debug|info|warning|error|critical]'))
def get_options():
return parser.parse_args()
|
# Test for behaviour of combined standard and extended block device
try:
import uos
uos.VfsFat
uos.VfsLfs2
except (ImportError, AttributeError):
print("SKIP")
raise SystemExit
class RAMBlockDevice:
ERASE_BLOCK_SIZE = 512
def __init__(self, blocks):
self.data = bytearray(blocks * self.ERASE_BLOCK_SIZE)
def readblocks(self, block, buf, off=0):
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
buf[i] = self.data[ | addr + i]
def writeblocks(self, block, buf, off=None):
if off is None:
# | erase, then write
off = 0
addr = block * self.ERASE_BLOCK_SIZE + off
for i in range(len(buf)):
self.data[addr + i] = buf[i]
def ioctl(self, op, arg):
if op == 4: # block count
return len(self.data) // self.ERASE_BLOCK_SIZE
if op == 5: # block size
return self.ERASE_BLOCK_SIZE
if op == 6: # erase block
return 0
def test(bdev, vfs_class):
print('test', vfs_class)
# mkfs
vfs_class.mkfs(bdev)
# construction
vfs = vfs_class(bdev)
# statvfs
print(vfs.statvfs('/'))
# open, write close
f = vfs.open('test', 'w')
for i in range(10):
f.write('some data')
f.close()
# ilistdir
print(list(vfs.ilistdir()))
# read
with vfs.open('test', 'r') as f:
print(f.read())
try:
bdev = RAMBlockDevice(50)
except MemoryError:
print("SKIP")
raise SystemExit
test(bdev, uos.VfsFat)
test(bdev, uos.VfsLfs2)
|
#!/usr/bin/env python
'''
Solves Constrainted Toy Problem Storing Optimization History.
min x1^2 + x2^2
s.t.: 3 - x1 <= 0
2 - x2 <= 0
-10 <= x1 <= 10
-10 <= x2 <= 10
'''
# =============================================================================
# Standard Python modules
# =============================================================================
import os, sys, time
import pdb
# =============================================================================
# Extension modules
# =============================================================================
from pyOpt import Optimization
from pyOpt import SLSQP
# ======================================== | =====================================
#
# =============================================================================
def objfunc(x):
f = x[0]**2 + x[1]**2
g = [0.0]*2
g[0] = 3 - x[0]
g[1] = 2 - x[1]
fail = 0
return f,g,fail
# ================================ | =============================================
#
# =============================================================================
# Instanciate Optimization Problem
opt_prob = Optimization('TOY Constraint Problem',objfunc)
opt_prob.addVar('x1','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addVar('x2','c',value=1.0,lower=0.0,upper=10.0)
opt_prob.addObj('f')
opt_prob.addCon('g1','i')
opt_prob.addCon('g2','i')
print opt_prob
# Instanciate Optimizer (ALPSO) & Solve Problem Storing History
slsqp = SLSQP()
slsqp.setOption('IFILE','slsqp1.out')
slsqp(opt_prob,store_hst=True)
print opt_prob.solution(0)
# Solve Problem Using Stored History (Warm Start)
slsqp.setOption('IFILE','slsqp2.out')
slsqp(opt_prob, store_hst=True, hot_start='slsqp1')
print opt_prob.solution(1)
|
# Copyright 2014
# The Cloudscaling Group, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# dis | tributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EC2api API Metadata Server
"""
import sys
from oslo_config import cfg
from os | lo_log import log as logging
from ec2api import config
from ec2api import service
CONF = cfg.CONF
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "ec2api")
server = service.WSGIService('metadata')
service.serve(server, workers=server.workers)
service.wait()
if __name__ == '__main__':
main()
|
##
# This file is an EasyBuild reciPY as per https://github.com/hpcugent/easybuild
#
# Copyright:: Copyright 2012-2017 Uni.Lu/LCSB, NTUA
# Authors:: Cedric Laczny <cedric.laczny@uni.lu>, Fotis Georgatos <fotis@cern.ch>, Kenneth Hoste
# License:: MIT/GPL
# $Id$
#
# This work implements a part of the HPCBIOS project and is a component of the policy:
# http://hpcbios.readthedocs.org/en/latest/HPCBIOS_2012-94.html
##
"""
EasyBuild support for building and installing MetaVelvet, implemented as an easyblock
@author: Cedric Laczny (Uni.Lu)
@author: Fotis Georgatos (Uni.Lu)
@author: Kenneth Hoste (Ghent University)
"""
import os
import shutil
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
class EB_MetaVelvet(ConfigureMake):
"""
Support for building MetaVelvet
"""
def configure_step(self):
"""
No configure
"""
pass
def install_step(self):
"""
Install by copy | ing files to install dir
| """
srcdir = self.cfg['start_dir']
destdir = os.path.join(self.installdir, 'bin')
srcfile = None
# Get executable files: for i in $(find . -maxdepth 1 -type f -perm +111 -print | sed -e 's/\.\///g' | awk '{print "\""$0"\""}' | grep -vE "\.sh|\.html"); do echo -ne "$i, "; done && echo
try:
os.makedirs(destdir)
for filename in ["meta-velvetg"]:
srcfile = os.path.join(srcdir, filename)
shutil.copy2(srcfile, destdir)
except OSError, err:
raise EasyBuildError("Copying %s to installation dir %s failed: %s", srcfile, destdir, err)
def sanity_check_step(self):
"""Custom sanity check for MetaVelvet."""
custom_paths = {
'files': ['bin/meta-velvetg'],
'dirs': []
}
super(EB_MetaVelvet, self).sanity_check_step(custom_paths=custom_paths)
|
#!/usr/bin/python3
# This script will compare the versions of ebuilds in the funtoo portage tree against
# the versions of ebuilds in the target portage tree. Any higher versions in the
# target Portage tree will be printed to stdout.
import portage.versions
import os,sys
import subprocess
import json
from merge_utils import *
dirpath = os.path.dirname(os.path.realpath(__file__))
print(" | List of differences between funtoo and gentoo")
print("=============================================")
def getKeywords(portdir, ebuild, warn):
a = subprocess.getstatusoutput(dirpath + "/keywords.sh %s %s" % ( portdir, e | build ) )
if a[0] == 0:
my_set = set(a[1].split())
return (0, my_set)
else:
return a
if len(sys.argv) != 3:
print("Please specify funtoo tree as first argument, gentoo tree as second argument.")
sys.exit(1)
gportdir=sys.argv[2]
portdir=sys.argv[1]
def filterOnKeywords(portdir, ebuilds, keywords, warn=False):
"""
This function accepts a path to a portage tree, a list of ebuilds, and a list of
keywords. It will iteratively find the "best" version in the ebuild list (the most
recent), and then manually extract this ebuild's KEYWORDS using the getKeywords()
function. If at least one of the keywords in "keywords" cannot be found in the
ebuild's KEYWORDS, then the ebuild is removed from the return list.
Think of this function as "skimming the masked cream off the top" of a particular
set of ebuilds. This way our list has been filtered somewhat and we don't have
gcc-6.0 in our list just because someone added it masked to the tree. It makes
comparisons fairer.
"""
filtered = ebuilds[:]
if len(ebuilds) == 0:
return []
cps = portage.versions.catpkgsplit(filtered[0])
cat = cps[0]
pkg = cps[1]
keywords = set(keywords)
while True:
fbest = portage.versions.best(filtered)
if fbest == "":
break
retval, fkeywords = getKeywords(portdir, "%s/%s/%s.ebuild" % (cat, pkg, fbest.split("/")[1] ), warn)
if len(keywords & fkeywords) == 0:
filtered.remove(fbest)
else:
break
return filtered
def get_cpv_in_portdir(portdir,cat,pkg):
if not os.path.exists("%s/%s/%s" % (portdir, cat, pkg)):
return []
if not os.path.isdir("%s/%s/%s" % (portdir, cat, pkg)):
return []
files = os.listdir("%s/%s/%s" % (portdir, cat, pkg))
ebuilds = []
for file in files:
if file[-7:] == ".ebuild":
ebuilds.append("%s/%s" % (cat, file[:-7]))
return ebuilds
def version_compare(portdir,gportdir,keywords,label):
print
print("Package comparison for %s" % keywords)
print("============================================")
print("(note that package.{un}mask(s) are ignored - looking at ebuilds only)")
print
for cat in os.listdir(portdir):
if cat == ".git":
continue
if not os.path.exists(gportdir+"/"+cat):
continue
if not os.path.isdir(gportdir+"/"+cat):
continue
for pkg in os.listdir(os.path.join(portdir,cat)):
ebuilds = get_cpv_in_portdir(portdir,cat,pkg)
gebuilds =get_cpv_in_portdir(gportdir,cat,pkg)
ebuilds = filterOnKeywords(portdir, ebuilds, keywords, warn=True)
if len(ebuilds) == 0:
continue
fbest = portage.versions.best(ebuilds)
gebuilds = filterOnKeywords(gportdir, gebuilds, keywords, warn=False)
if len(gebuilds) == 0:
continue
gbest = portage.versions.best(gebuilds)
if fbest == gbest:
continue
# a little trickery to ignore rev differences:
fps = list(portage.versions.catpkgsplit(fbest))[1:]
gps = list(portage.versions.catpkgsplit(gbest))[1:]
gps[-1] = "r0"
fps[-1] = "r0"
if gps[-2] in [ "9999", "99999", "999999", "9999999", "99999999"]:
continue
mycmp = portage.versions.pkgcmp(fps, gps)
if mycmp == -1:
json_out[label].append("%s/%s %s %s" % (cat, pkg, gbest[len(cat)+len(pkg)+2:], fbest[len(cat)+len(pkg)+2:]))
print("%s (vs. %s in funtoo)" % ( gbest, fbest ))
json_out={}
for keyw in [ "~amd64" ]:
if keyw == "~x86":
label = "fcx8632"
elif keyw == "~amd64":
label = "fcx8664"
json_out[label] = []
if keyw[0] == "~":
# for unstable, add stable arch and ~* and * keywords too
keyw = [ keyw, keyw[1:], "~*", "*"]
else:
# for stable, also consider the * keyword
keyw = [ keyw, "*"]
version_compare(portdir,gportdir,keyw,label)
for key in json_out:
json_out[key].sort()
json_out[key] = ",".join(json_out[key])
jsonfile = "/home/ports/public_html/my.json"
a = open(jsonfile, 'w')
json.dump(json_out, a, sort_keys=True, indent=4, separators=(',',": "))
a.close()
print("Wrote output to %s" % jsonfile)
|
all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import alabaster
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'alabaster',
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinxcontrib.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ribolands'
copyright = u'2016, Stefan Badelt'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.0'
# The full version, including alpha/beta/rc tags.
release = '0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory | . They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' ti | mestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ribolandsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'ribolands.tex', u'ribolands Documentation',
u'Stefan Badelt', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ribolands', u'ribolands Documentation',
[u'Stefan Badelt'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ribolands', u'ribolands Documentation',
u'Stefan Badelt', 'ribolands', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#te |
# adapt | ers | /tensorflow module initialization goes here...
|
defaul | t_app_config = 'wiki.plugins.links.app | s.LinksConfig'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
import Axon
class angleIncrement(Axon.Component.component):
def main(self):
angle = 0
while 1:
self.send(angle, "outbox")
angle += 0.1
if angle > 360:
angle -= 360
yield 1
class bounce3D(Axon.Component.component):
def main(self):
position = [ 0.0,0.0,-5.0 ]
dz = 0.01
while 1:
if abs(position[2]+10)>5: dz = -dz
position[2] += dz
self.send(position, "outbox")
yield 1
class rotatingCube(Axon.Component.component):
Inboxes = {
"inbox": "not used",
"control": "ignored",
"angle" : "We expect to recieve messages telling us the angle of rotation",
"position" : "We expect to receive messages telling us the new position",
}
def main(self):
pygame.init()
screen = pygame.display.set_mode((300,300),OPENGL|DOUBLEBUF)
pygame.display.set_caption('Simple cube')
# background
glClearColor(0.0,0.0,0.0,0.0)
# enable depth tests
glClearDepth(1.0)
glEnable(GL_DEPTH_TEST)
# projection matrix
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(45.0, float(300)/float(300), 0.1, 100.0)
# model matrix
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
pygame.display.flip()
angle=0
position = (0.0,0.0,-15.0)
while 1:
yield 1
for event in pygame.event.get():
if event.type == QUIT:
return
# clear screen
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
while self.dataReady("angle"):
# Use a while loop to ensure we clear the inbox to avoid messages piling up.
angle = self.recv("angle")
while self.dataReady("position"):
position = self.recv("position")
# translation and rotation
glPushMatrix()
glTranslate(*position)
glRotate(angle, 1.0,1.0,1.0)
# draw faces
glBegin(GL_QUADS)
glColor3f(1.0,0.0,0.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glColor3f(0.0,1.0,0.0)
glVertex3f(1.0,1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,0.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(1.0,-1.0,1.0)
glVertex3f(1.0,-1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1.0,0.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(-1.0,1.0,-1.0)
glColor3f(0.0,1.0,1.0)
glVertex3f(1.0,1.0,1.0)
glVertex3f(-1.0,1.0,1.0)
glVertex3f(-1.0,1.0,-1.0)
glVertex3f(1.0,1.0,-1.0)
glColor3f(1 | .0,1.0,0.0 | )
glVertex3f(1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,1.0)
glVertex3f(-1.0,-1.0,-1.0)
glVertex3f(1.0,-1.0,-1.0)
glEnd()
glPopMatrix()
glFlush()
pygame.display.flip()
if __name__=='__main__':
from Kamaelia.Util.Graphline import Graphline
Graphline(
TRANSLATION = bounce3D(),
ROTATION = angleIncrement(),
CUBE = rotatingCube(),
linkages = {
("ROTATION", "outbox") : ("CUBE", "angle"),
("TRANSLATION", "outbox") : ("CUBE", "position"),
}
).run()
|
import codecs
import os
from setuptools import setup, find_packages
def read(*parts):
filename = os.path.join(os.path.dirname(__file__), *parts)
with codecs.open(filename, encoding='utf-8') as fp:
return fp.read()
VERSION = (0, 3, 9)
version = '.'.join(map(str, VERSION))
setup(
name='python-quickbooks',
version=version,
author='Edward Emanuel Jr.',
author_email='edward@sidecarsinc.com',
description='A Python library for accessing the Quickbooks API.',
url='https://github.com/sidecars/python-quickbooks',
license='MIT',
keywords=['quickbooks', 'qbo', 'accounting'],
long_description=read('README.rst'),
install_requires=[
'setuptools',
'rauth>=0.7.1',
'requests>=2.7.0',
'simplejson>=2.2.0',
| 'six>=1.4.0',
'python-dateutil',
],
classif | iers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
packages=find_packages(),
)
|
# -*- coding: utf-8 -*-
# Genera | ted by Django 1.11.8 on 2017-12-11 07:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0032_history_j | son_args'),
]
operations = [
migrations.AddField(
model_name='history',
name='revision',
field=models.CharField(blank=True, max_length=256, null=True),
),
migrations.AlterField(
model_name='history',
name='json_args',
field=models.TextField(default='{}'),
),
]
|
tepanov/'
# Precip (bias corrected)
in_path_RCM_pr_nbc_50km=nobackup+"CLIPC/Model_data/pr/"+experiment+"/50km/daily/SMHI_DBS43_2006_2100/"
out_path_RCM_pr_nbc_50km=nobackup+"icclim_indices_v4.2.3_seapoint_fixed/EUR-44/"+experiment+"/pr/"
# output path still for test only
# =====================================================================================================
# Every RCM output file has predictable root name (specific to resolution!)
# ==> Construct data file names
#8/10 models. 2 more below in separate FOR loops.
models_list_50km = ['CCCma-CanESM2','CNRM-CERFACS-CNRM-CM5','NCC-NorESM1-M',
'MPI-M-MPI-ESM-LR','IPSL-IPSL-CM5A-MR','MIROC-MIROC5',
'NOAA-GFDL-GFDL-ESM2M','CSIRO-QCCCE-CSIRO-Mk3-6-0']
#models_list_50km = ['CCCma-CanESM2']
#models_list_50km = ['CNRM-CERFACS-CNRM-CM5']
for model in models_list_50km:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
pr_nbc_file_root_hist = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
pr_nbc_file_root_proj = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_pr_nbc_50km_hist = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_hist+"19660101-19701231.nc"
files_pr_nbc_50km_proj = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_proj+"20060101-20101231.nc"
# Tell me which files you imported
print 'Historical input Model files:', files_pr_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_pr_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_pr_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_pr_nbc_50km_proj,'r')
# Print current GCM tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_pr_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model_fout+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_pr_hist = Dataset(out_path_RCM_pr_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_pr_nbc_50km):
# ----------------------------------------------------------------
# Pre-change of
# model name in output file for models:
# indice into r1m when writing output file:
#
# NCC-NorESM1-M --> NorESM1-M
# MIROC-MIROC5 --> MIROC5
model_fout=model
#print "input model_fout is: ",model
if model == 'NCC-NorESM1-M': model_fout='NorESM1-M'
elif model == 'MIROC-MIROC5': model_fout='MIROC5'
elif model == 'CNRM-CERFACS-CNRM-CM5': model_fout='CNRM-CM5'
elif model == 'MPI-M-MPI-ESM-LR': model_fout='MPI-ESM-LR'
elif model == 'IPSL-IPSL-CM5A-MR': model_fout='IPSL-CM5A-MR'
elif model == 'NOAA-GFDL-GFDL-ESM2M': model_fout='GFDL-ESM2M'
elif model == 'CSIRO-QCCCE-CSIRO-Mk3-6-0': model_fout='CSIRO-Mk3-6-0'
else: model_fout=model
#print "new model_fout is: ",model_fout
if fnmatch.fnmatch(file_proj, "*"+model_fout+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
print
# Create Dataset from these files
nc_indice_pr_proj = Dataset(out_path_RCM_pr_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_proj.invar_tracking_id=nc_in_proj.tracking_id
# Had-GEM
models_list_50km_HadGEM = ['MOHC-HadGEM2-ES']
for model in models_list_50km_HadGEM:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
pr_nbc_file_root_hist = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
pr_nbc_file_root_proj = "prAdjust_EUR-44_"+model+"_"+experiment+"_r1i1p1_SMHI-RCA4_v1-SMHI-DBS43-EOBS10-1981-2010_day_"
# Explicit list
files_pr_nbc_50km_hist = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_hist+"19660101-19701230.nc"
files_pr_nbc_50km_proj = in_path_RCM_pr_nbc_50km+pr_nbc_file_root_proj+"20060101-20101230.nc"
# Tell me which files you imported
print 'Histori | cal input Model files:', files_pr_nbc_50km_hist # sep='\n'
print 'Projection input Model files:', files_pr_nbc_50km_proj # sep='\n'
# CONSTRUCT INDICES FILE NAMES
# Create datasets from netCDF files
nc_in_hist = Dataset(files_pr_nbc_50km_hist,'r')
nc_in_proj = Dataset(files_pr_nbc_50km_proj,'r')
# Print current GCM | tracking id
# Historical
print
print
print "For historical model:", model
print "Historical tracking id", nc_in_hist.tracking_id
print
for file_hist in os.listdir(out_path_RCM_pr_nbc_50km):
#if fnmatch.fnmatch(file_hist, '*CCCma-CanESM2_historical*'):
if fnmatch.fnmatch(file_hist, "*"+model[5:15]+"_historical*"):
#if fnmatch.fnmatch(file_hist, "*historical*"):
print "Indice where new historical invar_tracking_id goes is:", file_hist
#print
#print '%s' % (model)
# Create Dataset from these files
nc_indice_pr_hist = Dataset(out_path_RCM_pr_nbc_50km+file_hist,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_hist.invar_tracking_id=nc_in_hist.tracking_id
#nc_in_hist.comment = 'test'
#nc_in_hist.invar_tracking_id_test = 'test'
# Projections
print
print
print "For projections model:", model
print "Projection tracking id", nc_in_proj.tracking_id
print
print
for file_proj in os.listdir(out_path_RCM_pr_nbc_50km):
if fnmatch.fnmatch(file_proj, "*"+model[5:15]+"_"+experiment+"*"):
print "Indice where new projection invar_tracking_id goes is:", file_proj
# Create Dataset from these files
nc_indice_pr_proj = Dataset(out_path_RCM_pr_nbc_50km+file_proj,'a')
# Insert invar_tracking_id global attributed with value on the right
# (imported RCM tracking id from the single RCM file above)
#nc_indice_pr_hist.comment='fun'
nc_indice_pr_proj.invar_tracking_id=nc_in_proj.tracking_id
print
# EC-EARTH
models_list_50km_EC_EARTH = ['ICHEC-EC-EARTH']
for model in models_list_50km_EC_EARTH:
# CONSTRUCT RCM FILE NAMES
# New root for non-bias corrected (!nbc!) files:
pr_nbc_file_root_hist = "prAdjust_EUR-44_ |
# Класс-помощник для работы с сессией
class SessionHelper:
def __init__(self, app):
self.app = app
# Функция входа на сайт
def login(self, username, password):
wd = | self.app.wd
self.app.open_home_page()
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys(username)
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys(password)
wd.find_element_by_xpath("//form[@id='LoginForm']/input[3]").click()
# Функция выхода с сайта
def logout(sel | f):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
# Функция удаления фикстуры после завершения теста
def destroy(self):
self.app.wd.quit()
# Функция проверки выхода с сайта
def ensure_logout(self):
wd = self.app.wd
if self.is_logged_in():
self.logout()
# Функция проверки входа на сайт
def is_logged_in(self):
wd = self.app.wd
# Если на странице есть элемент с текстом "Logout", то пользователь вошел на сайт
return len(wd.find_elements_by_link_text("Logout")) > 0
# Функция проверки имени с которым произошел вход на сайт
def is_logged_in_as(self, username):
wd = self.app.wd
# Если на странице есть элемент с текстом который соответсвует имени пользователя, то есть логин
return wd.find_element_by_xpath("//div/div[1]/form/b").text == "("+username+")"
# Функция проверки логина во время прогона тестов
def ensure_login(self, username, password):
wd = self.app.wd
# Если пользователь вошел на сайт
if self.is_logged_in():
# И если пользователь вошел на сайт под ожидаемым именем
if self.is_logged_in_as(username):
# Тогда ничего не делаем
return
else:
# Иначе производим выход с сайта, для последующего входа
self.logout()
self.login(username, password) |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under th | e Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# | http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lift_to_graph."""
from tensorflow.python.eager import def_function
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops as framework_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.util import compat
class LiftToGraphTest(test.TestCase):
def testCaptureOrdering(self):
v1 = resource_variable_ops.ResourceVariable(1.0)
v2 = resource_variable_ops.ResourceVariable(2.0)
v3 = resource_variable_ops.ResourceVariable(3.0)
@def_function.function
def fn():
return v1 + v2 + v3
concrete_fn = fn.get_concrete_function()
original_captures = concrete_fn.graph.internal_captures
outputs = concrete_fn.graph.outputs
for _ in range(100):
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph(
outputs, g, add_sources=True, handle_captures=True)
lifted_captures = g.internal_captures
self.assertLen(lifted_captures, 3)
for original, lifted in zip(original_captures, lifted_captures):
self.assertEqual(original.name, lifted.name)
def testClassAttrsRemoved(self):
"""Tests that _class attrs (from colocate_with()) are removed."""
@def_function.function
def fn():
two = constant_op.constant(2.0, name='two')
ten = constant_op.constant(10.0, name='ten')
twenty = math_ops.multiply(two, ten, name='twenty')
three = constant_op.constant(3.0, name='three')
with framework_ops.colocate_with(twenty):
thirty = math_ops.multiply(three, ten, name='thirty')
return ten, twenty, thirty
concrete_fn = fn.get_concrete_function()
self.assertItemsEqual( # Before lifting, 'fn' has colocation attrs.
concrete_fn.graph.get_operation_by_name('thirty').colocation_groups(),
[compat.as_bytes('loc:@twenty')])
thirty_out = concrete_fn.graph.outputs[2]
g = func_graph.FuncGraph('lifted')
lift_to_graph.lift_to_graph([thirty_out], g)
# After lifting, colocation attrs are gone.
ops = g.get_operations()
self.assertItemsEqual([op.name for op in ops],
['three', 'ten', 'thirty', # Lifted from `fn` body.
thirty_out.op.name]) # Wrapper for output.
for op in ops:
with self.assertRaises(ValueError):
class_attr = op.get_attr('_class') # Expected not to exist.
print('Unexpected class_attr', class_attr, 'on', op.name)
self.assertItemsEqual(op.colocation_groups(), # Expect default self-ref.
[compat.as_bytes('loc:@%s' % op.name)])
if __name__ == '__main__':
test.main()
|
from django.conf.urls.defaults import *
from indivo.views import *
from indivo.lib.utils import MethodDispatcher
urlpatterns = patterns('',
(r'^$', MethodDispatcher({
'DELETE' : carenet_delete})),
(r'^/rename$', MethodDispatcher({
| 'POST' : carenet_rename})),
(r'^/record$', MethodDispatcher({'GET':carenet_record})),
# Manage documents |
(r'^/documents/', include('indivo.urls.carenet_documents')),
# Manage accounts
(r'^/accounts/$',
MethodDispatcher({
'GET' : carenet_account_list,
'POST' : carenet_account_create
})),
(r'^/accounts/(?P<account_id>[^/]+)$',
MethodDispatcher({ 'DELETE' : carenet_account_delete })),
# Manage apps
(r'^/apps/$',
MethodDispatcher({ 'GET' : carenet_apps_list})),
(r'^/apps/(?P<pha_email>[^/]+)$',
MethodDispatcher({ 'PUT' : carenet_apps_create,
'DELETE': carenet_apps_delete})),
# Permissions Calls
(r'^/accounts/(?P<account_id>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_account_permissions })),
(r'^/apps/(?P<pha_email>[^/]+)/permissions$',
MethodDispatcher({ 'GET' : carenet_app_permissions })),
# Reporting Calls
(r'^/reports/minimal/procedures/$',
MethodDispatcher({'GET':carenet_procedure_list})),
(r'^/reports/minimal/simple-clinical-notes/$',
MethodDispatcher({'GET':carenet_simple_clinical_notes_list})),
(r'^/reports/minimal/equipment/$',
MethodDispatcher({'GET':carenet_equipment_list})),
(r'^/reports/minimal/measurements/(?P<lab_code>[^/]+)/$',
MethodDispatcher({'GET':carenet_measurement_list})),
(r'^/reports/(?P<data_model>[^/]+)/$',
MethodDispatcher({'GET':carenet_generic_list})),
# Demographics
(r'^/demographics$', MethodDispatcher({'GET': read_demographics_carenet})),
)
|
# This is just a simple example of how to inspect ASTs visually.
#
# This can be useful for developing new operators, etc.
import ast
from cosmic_ray.mutating import | MutatingCore
from cosmic_ray.operators.comparison_operator_replacement import MutateComparisonOperator
code = "((x is not y) ^ (x is y))"
node = ast.parse(code)
print()
print(ast.dump(node))
core = MutatingCore(0)
operator = MutateComparisonOperator(core)
new_node = operator.visit(node)
print()
print(ast.d | ump(new_node))
|
N, K = map(int, input().split())
R = sorted(map(int, input().split()))
ans = 0
for r in R[len(R)-K:]:
ans = (ans + r) / 2
print(ans | )
| |
#!/usr/bin/env python3
import re
text = 'This | is some text -- with p | unctuation.'
pattern = 'is'
print('Text :', text)
print('Pattern:', pattern)
m = re.match(pattern, text)
print('Match :', m)
s = re.search(pattern, text)
print('Search :', s)
pattern = re.compile(r'\b\w*is\w*\b')
print('Text:', text)
pos = 0
while True:
match = pattern.search(text, pos)
if not match:
break
s = match.start()
e = match.end()
print(' %2d : %2d = "%s"' % (s, e-1, text[s:e]))
pos = e
|
#!/usr/bin/python
import sys
import os
import re
sys.path.append('/home/al/sites')
os.environ['DJANGO_SETTINGS_MODULE'] = '__main__'
DEFAULT_CHARSET = "utf-8"
TEMPLATE_DEBUG = False
LANGUAGE_CODE = "en"
INSTALLED_APPS = (
'django.contrib.markup',
)
TEMPLATE_DI | RS = (
'/home/al/sites/liquidx/templates',
'.'
)
from django.template import Template, Context, loader
def make(src, dst):
print '%s -> %s' % (src, dst)
c = Context({})
filled = loader.render_to_string(src, {})
open(dst, 'w').write(filled)
if __name__ == "__main__":
for dirname, dirs, files in os.walk('.'):
if re.search('/\.svn', dirname):
continue
for f in files:
if f[-4:] == ".txt":
| newname = f.replace('.txt', '.html')
make(os.path.join(dirname, f), os.path.join(dirname, newname))
|
import json
from collections import OrderedDict
from inspect import signature
from warnings import warn
import numpy as np
from sklearn.base import BaseEstimator
class Configuration(object):
def __init__(self, name, version, params):
if not isinstance(name, str):
raise ValueError()
if not isinstance(params, dict):
raise ValueError()
self.name = name
self.version = version
self.params = params
def __str__(self):
if len(self.params) == 0:
return "%s-v%s" % (self.name, self.version)
json_params = config_to_json(self.params)
if len(json_params) < 200:
return "%s-v%s: %s" % (self.name, self.version, json_params)
else:
return "%s-v%s {...}" % (self.name, self.version)
def __eq__(self, other):
return isinstance(other, Configuration) and \
self.name == other.name and \
self.version == other.version and \
self.params == other.params
class Configurable(object):
"""
Configurable classes have names, versions, and a set of parameters that are either "simple" aka JSON serializable
types or other Configurable objects. Configurable objects should also be serializable via pickle.
Configurable classes are defined mainly to give us a human-readable way of reading of the `parameters`
set for different objects and to attach version numbers to them.
By default we follow the format sklearn uses for its `BaseEstimator` class, where parameters are automatically
derived based on the constructor parameters.
"""
@classmethod
def _get_param_names(cls):
# fetch the constructor or the original constructor before
init = cls.__init__
if init is object.__init__:
# No explicit constructor to introspect
return []
init_signature = signature(init)
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self']
if any(p.kind == p.VAR_POSITIONAL for p in parameters):
raise RuntimeError()
return sorted([p.name for p in parameters])
@property
def name(self):
return self.__class__.__name__
@property
def version(self):
return 0
def get_params(self):
out = {}
for key in self._get_param_names():
v = getattr(self, key, None)
if isinstance(v, Configurable):
out[key] = v.get_config()
elif hasattr(v, "get_config"): # for keras objects
out[key] = {"name": v.__class__.__name__, "config": v.g | et_config()}
else:
out[key] = v
return out
def get_con | fig(self) -> Configuration:
params = {k: describe(v) for k,v in self.get_params().items()}
return Configuration(self.name, self.version, params)
def __getstate__(self):
state = dict(self.__dict__)
if "version" in state:
if state["version"] != self.version:
raise RuntimeError()
else:
state["version"] = self.version
return state
def __setstate__(self, state):
if "version" not in state:
raise RuntimeError("Version should be in state (%s)" % self.__class__.__name__)
if state["version"] != self.version:
warn(("%s loaded with version %s, but class " +
"version is %s") % (self.__class__.__name__, state["version"], self.version))
if "state" in state:
self.__dict__ = state["state"]
else:
del state["version"]
self.__dict__ = state
def describe(obj):
if isinstance(obj, Configurable):
return obj.get_config()
else:
obj_type = type(obj)
if obj_type in (list, set, frozenset, tuple):
return obj_type([describe(e) for e in obj])
elif isinstance(obj, tuple):
# Name tuple, convert to tuple
return tuple(describe(e) for e in obj)
elif obj_type in (dict, OrderedDict):
output = OrderedDict()
for k, v in obj.items():
if isinstance(k, Configurable):
raise ValueError()
output[k] = describe(v)
return output
else:
return obj
class EncodeDescription(json.JSONEncoder):
""" Json encoder that encodes 'Configurable' objects as dictionaries and handles
some numpy types. Note decoding this output will not reproduce the original input,
for these types, this is only intended to be used to produce human readable output.
'"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.dtype):
return str(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.bool_):
return bool(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, BaseEstimator): # handle sklearn estimators
return Configuration(obj.__class__.__name__, 0, obj.get_params())
elif isinstance(obj, Configuration):
if "version" in obj.params or "name" in obj.params:
raise ValueError()
out = OrderedDict()
out["name"] = obj.name
if obj.version != 0:
out["version"] = obj.version
out.update(obj.params)
return out
elif isinstance(obj, Configurable):
return obj.get_config()
elif isinstance(obj, set):
return sorted(obj) # Ensure deterministic order
else:
try:
return super().default(obj)
except TypeError:
return str(obj)
def config_to_json(data, indent=None):
return json.dumps(data, sort_keys=False, cls=EncodeDescription, indent=indent)
|
"""
Send and receive pre-defined messages through the Bohrium component stack
=========================================================================
"""
from ._bh_api import message as msg
def statistic_enable_and_reset():
"""Reset and enable the Bohrium statistic"""
return msg("statistic_enable_and_reset")
def statistic():
"""Return a YAML string of Bohr | ium statistic"""
return msg("statistic")
def gpu_disable():
"""Disable the GPU backend in the current runtime stack"""
return msg("GPU: disable")
def gpu_enable():
"""Enable the GPU backend in the current runtime stack"""
return msg("GPU: enable")
def runtime_info():
"""Return a YAML | string describing the current Bohrium runtime"""
return msg("info")
def cuda_use_current_context():
"""Tell the CUDA backend to use the current CUDA context (useful for PyCUDA interop)"""
return msg("CUDA: use current context")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# SoundConverter - GNOME application for converting between audio formats.
# Copyright 2004 Lars Wirzenius
# Copyright 2005-2020 Gautier Portet
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 of the License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free So | ftware
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import sys
try:
import DistUtilsExtra.auto
except ImportError:
sys.stderr.write('You need python-distutils-extra\n')
sys.exit(1)
import os
import DistUtilsExtra.auto
# This will automatically, assuming that the prefix is /usr
# - Compile and install po files to /usr/share | /locale*.mo,
# - Install .desktop files to /usr/share/applications
# - Install all the py files to /usr/lib/python3.8/site-packages/soundconverter
# - Copy bin to /usr/bin
# - Copy the rest to /usr/share/soundconverter, like the .glade file
# Thanks to DistUtilsExtra (https://salsa.debian.org/python-team/modules/python-distutils-extra/-/tree/master/doc) # noqa
class Install(DistUtilsExtra.auto.install_auto):
def run(self):
DistUtilsExtra.auto.install_auto.run(self)
# after DistUtilsExtra automatically copied data/org.soundconverter.gschema.xml
# to /usr/share/glib-2.0/schemas/ it doesn't seem to compile them.
glib_schema_path = os.path.join(self.install_data, 'share/glib-2.0/schemas/')
cmd = 'glib-compile-schemas {}'.format(glib_schema_path)
print('running {}'.format(cmd))
os.system(cmd)
DistUtilsExtra.auto.setup(
name='soundconverter',
version='4.0.2',
description=(
'A simple sound converter application for the GNOME environment. '
'It writes WAV, FLAC, MP3, and Ogg Vorbis files.'
),
license='GPL-3.0',
data_files=[
('share/metainfo/', ['data/soundconverter.appdata.xml']),
('share/pixmaps/', ['data/soundconverter.png']),
('share/icons/hicolor/scalable/apps/', ['data/soundconverter.svg'])
],
cmdclass={
'install': Install
}
)
|
"""The tests for the GeoNet NZ Quakes Feed integration."""
import datetime
from unittest.mock import patch
from homeassistant.components import geonetnz_quakes
from homeassistant.components.geonetnz_quakes import DEFAULT_SCAN_INTERVAL
from homeassistant.components.geonetnz_quakes.sensor import (
ATTR_CREATED,
ATTR_LAST_UPDATE,
ATTR_LAST_UPDATE_SUCCESSFUL,
ATTR_REMOVED,
ATTR_STATUS,
ATTR_UPDATED,
)
from homeassistant.const import (
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.geonetnz_quakes import _generate_mock_feed_entry
CONFIG = {geonetnz_quakes.DOMAIN: {CONF_RADIUS: 200}}
async def test_setup(hass, legacy_patchable_time):
"""Test the general setup of the integration."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
locality="Locality 1",
attribution="Attribution 1",
time=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
mmi=5,
depth=10.5,
quality="best",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), locality="Locality 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcn | ow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_ | value = "OK", [mock_entry_1, mock_entry_2, mock_entry_3]
assert await async_setup_component(hass, geonetnz_quakes.DOMAIN, CONFIG)
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
# 3 geolocation and 1 sensor entities
assert len(all_states) == 4
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
assert state is not None
assert int(state.state) == 3
assert state.name == "GeoNet NZ Quakes (32.87336, -117.22743)"
attributes = state.attributes
assert attributes[ATTR_STATUS] == "OK"
assert attributes[ATTR_CREATED] == 3
assert attributes[ATTR_LAST_UPDATE].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE_SUCCESSFUL].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE] == attributes[ATTR_LAST_UPDATE_SUCCESSFUL]
assert attributes[ATTR_UNIT_OF_MEASUREMENT] == "quakes"
assert attributes[ATTR_ICON] == "mdi:pulse"
# Simulate an update - two existing, one new entry, one outdated entry
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_4, mock_entry_3]
async_fire_time_changed(hass, utcnow + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_CREATED] == 1
assert attributes[ATTR_UPDATED] == 2
assert attributes[ATTR_REMOVED] == 1
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.geonet_nz_quakes_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_REMOVED] == 3
|
import pyfits
from numpy import *
if __name__ == | '__main__':
W,H = 10,10
sigma = 1.
X,Y = meshgrid(range(W), range(H))
img = 5 | 0 + 200 * exp(-0.5 * ((X - W/2)**2 + (Y - H/2)**2)/(sigma**2))
pyfits.writeto('tstimg.fits', img, clobber=True)
|
Returns:
The results of executing the UDF converted to a dataframe if no variable
was specified. None otherwise.
"""
variable_name = args['module']
if not variable_name:
raise Exception('Declaration must be of the form %%bigquery udf --module <variable name>')
# Parse out the input and output specification
spec_pattern = r'\{\{([^}]+)\}\}'
spec_part_pattern = r'[a-z_][a-z0-9_]*'
specs = re.findall(spec_pattern, js)
if len(specs) < 2:
raise Exception('The JavaScript must declare the input row and output emitter parameters '
'using valid jsdoc format comments.\n'
'The input row param declaration must be typed as {{field:type, field2:type}} '
'and the output emitter param declaration must be typed as '
'function({{field:type, field2:type}}.')
inputs = []
input_spec_parts = re.findall(spec_part_pattern, specs[0], flags=re.IGNORECASE)
if len(input_spec_parts) % 2 != 0:
raise Exception('Invalid input row param declaration. The jsdoc type expression must '
'define an object with field and type pairs.')
for n, t in zip(input_spec_parts[0::2], input_spec_parts[1::2]):
inputs.append((n, t))
outputs = []
output_spec_parts = re.findall(spec_part_pattern, specs[1], flags=re.IGNORECASE)
if len(output_spec_parts) % 2 != 0:
raise Exception('Invalid output emitter param declaration. The jsdoc type expression must '
'define a function accepting an an object with field and type pairs.')
for n, t in zip(output_spec_parts[0::2], output_spec_parts[1::2]):
outputs.append((n, t))
# Look for imports. We use a non-standard @import keyword; we could alternatively use @requires.
# Object names can contain any characters except \r and \n.
import_pattern = r'@import[\s]+(gs://[a-z\d][a-z\d_\.\-]*[a-z\d]/[^\n\r]+)'
imports = re.findall(import_pattern, js)
# Split the cell if necessary. We look for a 'function(' with no name and a header comment
# block with @param and assume this is the primary function, up to a closing '}' at the start
# of the line. The remaining cell content is used as support code.
split_pattern = r'(.*)(/\*.*?@param.*?@param.*?\*/\w*\n\w*function\w*\(.*?^}\n?)(.*)'
parts = re.match(split_pattern, js, re.MULTILINE | re.DOTALL)
support_code = ''
if parts:
support_code = (parts.group(1) + parts.group(3)).strip()
if len(support_code):
js = parts.group(2)
# Finally build the UDF object
udf = datalab.bigquery.UDF(inputs, outputs, variable_name, js, support_code, imports)
datalab.utils.commands.notebook_environment()[variable_name] = udf
def _execute_cell(args, cell_body):
"""Implements the BigQuery cell magic used to execute BQ queries.
The supported syntax is:
%%bigquery execute [-q|--sql <query identifier>] <other args>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery execute'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
query = _get_query_argument(args, cell_body, datalab.utils.commands.notebook_environment())
if args['verbose']:
print(query.sql)
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results
def _pipeline_cell(args, cell_body):
"""Implements the BigQuery cell magic used to validate, execute or deploy BQ pipelines.
The supported syntax is:
%%bigquery pipeline [-q|--sql <query identifier>] <other args> <action>
[<YAML or JSON cell_body or inline SQL>]
Args:
args: the arguments following '%bigquery pipeline'.
cell_body: optional contents of the cell interpreted as YAML or JSON.
Returns:
The QueryResultsTable
"""
if args['action'] == 'deploy':
raise Exception('Deploying a pipeline is not yet supported')
env = {}
for key, value in datalab.utils.commands.notebook_environment().items():
if isinstance(value, datalab.bigquery._udf.UDF):
env[key] = value
query = _get_query_argument(args, cell_body, env)
if args['verbose']:
print(query.sql)
if args['action'] == 'dryrun':
print(query.sql)
result = query.execute_dry_run()
return datalab.bigquery._query_stats.QueryStats(total_bytes=result['totalBytesProcessed'],
is_cached=result['cacheHit'])
if args['action'] == 'run':
return query.execute(args['target'], table_mode=args['mode'], use_cache=not args['nocache'],
allow_large_results=args['large'], dialect=args['dialect'],
billing_tier=args['billing']).results
def _table_line(args):
"""Implements the BigQuery table magic used to display tables.
The supported syntax is:
%bigquery table -t|--table <name> <other args>
Args:
args: the arguments following '%bigquery table'.
Returns:
The HTML rendering for the table.
"""
# TODO(gram): It would be good to turn _table_viewer into a class that has a registered
# renderer. That would allow this to return a table viewer object which is easier to test.
name = args['table']
table = _get_table(name)
if table and table.exists():
fields = args['cols'].split(',') if args['cols'] else None
html = _table_viewer(table, rows_per_page=args['rows'], fields=fields)
return IPython.core.display.HTML(html)
else:
raise Exception('Table %s does not exist; cannot display' % name)
def _get_schema(name):
""" Given a variable or table name, get the Schema if it exists. """
item = datalab.utils.commands.get_notebook_item(name)
if not item:
item = _get_table(name)
if isinstance(item, datalab.bigquery.Schema):
return item
if hasattr(item, 'schema') and isinstance(item.schema, datalab.bigquery._schema.Schema):
return item.schema
return None
# An LRU cache for Tables. This is mostly useful so that when we cross page boundaries
# when paging through a table we don't have to re-fetch the schema.
_table_cache = datalab.utils.LRUCache(10)
def _get_table(name):
""" Given a variable or table name, get a Table if it exists.
Args:
name: the name of the Table or a variable referencing the Table.
Returns:
The Table, if found.
"""
# If name is a variable referencing a table, use that.
item = datalab.utils.commands.get_notebook_item(name)
if isinstance(item, datalab.bigquery.Table):
return item
# Else treat this as a BQ table name and return the (cached) table | if it exists.
try:
return _table_cache[name]
except KeyError:
table = datalab.bigquery.Table(name)
if table.exists():
_table_cache[name] = table
return table
return None
def _schema_line(args):
"""Implements the BigQuery schema magic used to display table/view schemas.
Args:
args: the arguments following '%bigquery schema'.
Returns:
The HTML rendering for the schema.
""" |
# TODO(gram): surely we could just return the schema itself?
name = args['table'] if args['table'] else args['view']
if name is None:
raise Exception('No table or view specified; cannot show schema')
schema = _get_schema(name)
if schema:
html = _repr_html_table_schema(schema)
return IPython.core.display.HTML(html)
else:
raise Exception('%s is not a schema and does not appear to have a schema member' % name)
def _render_table(data, fields=None):
""" Helper to render a list of dictionaries as an HTML display object. """
return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_table(data, fields))
def _render_list(data):
""" Helper to render a list of objects as an HTML list object. """
return IPython.core.display.HTML(datalab.utils.commands.HtmlBuilder.render_list(data))
def _datasets_line(args):
"""Implements the BigQuery datasets magic used to display datasets in a project.
The supported syntax is:
%bigquery datasets [-f <filter>] [-p|--project <project_id> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.