text stringlengths 0 1.05M | meta dict |
|---|---|
# Agent.py
from Tools import *
from agTools import *
class Agent(SuperAgent):
def __init__(self, number, myWorldState,
xPos, yPos, zPos, lX=0, rX=0, bY=0, tY=0, bZ=0, tZ=0,
agType=""):
# 0 definitions to be replaced (useful only if the
# dimensions are omitted and we do not use space)
# the environment
self.agOperatingSets = []
self.number = number
self.lX = lX
self.rX = rX
self.bY = bY
self.tY = tY
self.bZ = bZ
self.tZ = tZ
if myWorldState != 0:
self.myWorldState = myWorldState
self.agType = agType
# the agent
self.xPos = xPos
self.yPos = yPos
self.zPos = zPos
print("agent", self.agType, "#", self.number,
"has been created at", self.xPos, ",", self.yPos, ",", self.zPos)
# ",**d" in the parameter lists of the methods is a place holder
# in case we use, calling the method, a dictionary as last par
# eating
def eat(self, **d):
print("I'm %s agent # %d: " % (self.agType, self.number), end=' ')
print("nothing to eat here!")
# dancing
def dance(self, **d):
print("I'm %s agent # %d: " % (self.agType, self.number), end=' ')
if self.agType == "tasteA":
print("I'm an A, nice to dance here!")
elif self.agType == "tasteB":
print("I'm a B, not so nice to dance here!")
elif self.agType == "tasteC":
print("I'm a C, why to dance here?")
else:
print("it's not time to dance!")
# the action, also jumping
def randomMovement(self, **k):
if random.random() <= self.myWorldState.getGeneralMovingProb():
print("agent %s # %d moving" % (self.agType, self.number))
self.jump = 1
if "jump" in k:
self.jump = k["jump"]
dx = randomMove(self.jump)
self.xPos += dx
dy = randomMove(self.jump)
self.yPos += dy
dz = randomMove(self.jump)
self.zPos += dz
#self.xPos = (self.xPos + self.worldXSize) % self.worldXSize
#self.yPos = (self.yPos + self.worldYSize) % self.worldYSize
if self.xPos < self.lX:
self.xPos = self.lX
if self.xPos > self.rX:
self.xPos = self.rX
if self.yPos < self.bY:
self.yPos = self.bY
if self.yPos > self.tY:
self.yPos = self.tY
if self.zPos < self.bZ:
self.zPos = self.bZ
if self.zPos > self.tZ:
self.zPos = self.tZ
# report
def reportPosition(self, **d):
print(self.agType, "agent # ", self.number, " is at X = ",
self.xPos, " Y = ", self.yPos, " Z = ", self.zPos)
def reportPos(self, **d):
return (self.xPos, self.yPos, self.zPos)
# adding a task (from v. 1.35 of SLAPP)
# common is derived importing Tools
def addTask(self):
newTask = "all dance"
print(
"agent",
self.number,
"adding a task for cycle",
common.cycle + 1)
if common.cycle + 1 not in common.addTasks:
common.addTasks[common.cycle + 1] = []
common.addTasks[common.cycle + 1].append(newTask)
# eliminating a task (from v. 1.35 of SLAPP)
# common is derived importing Tools
def elimTask(self):
killTask = "tasteC eat"
print("agent", self.number, "eliminating a task for cycle",
common.cycle + 2)
if common.cycle + 2 not in common.elimTasks:
common.elimTasks[common.cycle + 2] = []
common.elimTasks[common.cycle + 2].append(killTask)
# returns -1, 0, 1 with equal probability
def randomMove(jump):
return random.randint(-1, 1) * jump
| {
"repo_name": "terna/SLAPP3",
"path": "6 objectSwarmObserverAgents_AESOP_turtleLib_NetworkX/basic3D/Agent.py",
"copies": "1",
"size": "3936",
"license": "cc0-1.0",
"hash": -5326909922621836000,
"line_mean": 31.8,
"line_max": 79,
"alpha_frac": 0.525152439,
"autogenerated": false,
"ratio": 3.4166666666666665,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9435449717964777,
"avg_score": 0.0012738775403779133,
"num_lines": 120
} |
# agent.py
# --------
import util
def scoreEvaluationFn(gameState, player):
'''
This default evaluation function just returns the score of the state.
'''
enemyScore, playerScore = gameState.getScore(player)
return playerScore - enemyScore
def betterEvaluationFn(gameState, player):
'''
Insane piece killing evaluation function
DESCRIPTION:
< Considering: Mobility, Number of Doubled, Isolated Pawns and Pinned Pieces,
King checked, Queen Trapped >
Pawn Structure:
- Penalise doubled, backward and blocked pawns.
- Encourage pawn advancement where adequately defended.
- Encourage control of the centre of the board.
Piece Placement:
- Encourage knights to occupy the centre of the board.
- Encourage bishops to occupy principal diagonals.
- Encourage queens and rooks to defend each other and attack.
- Encourage 7th rank attacks for rooks.
Passed Pawns:
- These deserve a special treatment as they are so important.
- Check for safety from opposing king and enemy pieces.
- Test pawn structure for weaknesses, such as hidden passed pawns.
- Add enormous incentives for passed pawns near promotion.
King Safety
- Encourage the king to stay to the corner in the middlegame.
- Try to retain an effective pawn shield.
- Try to stop enemy pieces from getting near to the king.
'''
mobility = len(player.getLegalActions(gameState))
enemyScore, playerScore = gameState.getScore(player)
pawns = [piece for piece in player.pieces if piece.toString() == "P"]
actions = player.getLegalActions(gameState)
NumOfDoubledPawns = 0
for pawn in pawns:
if player.color == "White":
if (pawn.pos[0], pawn.pos[1]+1) in [pawn.pos for pawn in pawns]:
NumOfDoubledPawns += 1
else:
if (pawn.pos[0], pawn.pos[1]-1) in [pawn.pos for pawn in pawns]:
NumOfDoubledPawns += 1
NumOfIsolatedPawns = 0
for pawn in pawns:
if util.computeMinDistFromOtherPieces(pawn, player.pieces) > 4:
NumOfIsolatedPawns += 1
NumOfPinnedPieces = gameState.pinnedPieces
centralControl = 0 # checking control over the coordinates: (2,3) (3,3) (4,3) (5,3) (2,4) (3,4) (4,4) (5,4)
central_coords = [(2,3),(3,3),(4,3),(5,3),(2,4),(3,4),(4,4),(5,4)]
pieces_pos = [piece.pos for piece in player.pieces]
actions_pos = [action.newPos for action in actions]
for coord in central_coords:
if coord in pieces_pos or coord in actions_pos:
centralControl += 1
sum = 10 * mobility - 20 * NumOfPinnedPieces - 5* (NumOfIsolatedPawns+NumOfDoubledPawns) + 100 * (playerScore-enemyScore) + 50 * centralControl
return sum
class Agent(object):
"""An abstract class for AlphaBeta and Expectimax agents"""
def __init__(self, player, enemy, evalFn=scoreEvaluationFn, depth="2"):
super(Agent, self).__init__()
self.player = player
self.color = player.color
self.enemy = enemy
self.evaluationFunction = evalFn
self.depth = int(depth)
def getAction(self, args):
util.raiseNotDefined()
class AlphaBetAgent(Agent):
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
"*** YOUR CODE HERE ***"
value, index = self.max_value(gameState, self.depth, float('-inf'), float('inf'))
return self.player.getLegalActions(gameState)[index]
def max_value(self, gameState, curDepth, alpha, beta):
v = float('-inf')
# print "Max node"
# print "Current Depth:", curDepth
legalActions = self.player.getLegalActions(gameState)
counter = 0
# Check if this is an end state and whether the depth has been reached
if len(legalActions) == 0 or curDepth == 0:
# print "Returns: ", self.evaluationFunction(gameState, self.player)
return self.evaluationFunction(gameState, self.player), None
for i, action in enumerate(legalActions):
successor = gameState.getSuccessor(action)
# Recurse if depth has not been reached
newv = max(v, self.min_value(successor, curDepth, alpha, beta))
# keep track of the index of the best action
if newv != v: counter = i
v = newv
if v > beta: return v, counter # pruning
alpha = max(alpha, v)
return v, counter
def min_value(self, gameState, curDepth, alpha, beta):
v = float('inf')
# print "Min Node"
# print "Current Depth:", curDepth
legalActions = self.player.getLegalActions(gameState)
# Check if this is an end state
if len(legalActions) == 0:
# print "Returns: ", self.evaluationFunction(gameState, self.enemy)
return self.evaluationFunction(gameState, self.enemy)
for action in legalActions:
successor = gameState.getSuccessor(action)
# Switch to MAX agent
v = min(v, self.max_value(successor, curDepth-1, alpha, beta)[0])
if v < alpha: return v # pruning
beta = min(beta, v)
return v
class ExpectimaxAgent(Agent):
"""
A simple Expectimax Agent
"""
def getAction(self, gameState):
value, index = self.max_value(gameState, self.depth)
return self.player.getLegalActions(gameState)[index]
def max_value(self, gameState, curDepth):
legalActions = self.player.getLegalActions(gameState)
counter = 0
v = float('-inf')
if len(legalActions) == 0 or curDepth == 0:
return self.evaluationFunction(gameState, self.player), None
for i, action in enumerate(legalActions):
successor = gameState.getSuccessor(action)
newv = max(v, self.expect_value(successor, curDepth))
# keep track of the index of the best action
if newv != v: counter = i
v = newv
return v, counter
def expect_value(self, gameState, curDepth):
legalActions = self.player.getLegalActions(gameState)
total = 0
if len(legalActions) == 0:
return self.evaluationFunction(gameState, self.enemy)
for action in legalActions:
successor = gameState.getSuccessor(action)
# Switch to MAX agent
total = total + self.max_value(successor, curDepth-1)[0]
return ( float(total) / len(legalActions) )
| {
"repo_name": "AhanM/ChessAI",
"path": "agent.py",
"copies": "1",
"size": "5883",
"license": "mit",
"hash": -6511213693691430000,
"line_mean": 27.2884615385,
"line_max": 144,
"alpha_frac": 0.7079721231,
"autogenerated": false,
"ratio": 3.051348547717842,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9000471388917792,
"avg_score": 0.05176985638001005,
"num_lines": 208
} |
"""agent.py -- personal assistant and modular chat bot
Copyright 2016 Rylan Santinon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
from .handlers.greetingstatementhandler import GreetingStatementHandler
from .handlers.arithmetichandler import ArithmeticHandler
from .handlers.declaredmemoryhandler import DeclaredMemoryHandler
from .handlers.declarationhandler import DeclarationHandler
from .handlers.convoterminationhandler import ConvoTerminationHandler
from .handlers.elizastatementhandler import ElizaStatementHandler
from .handlerbase import DefaultStatementHandler
class Figaro(object):
"""Figaro -- the personal assistant"""
def __init__(self):
self._conv_ended = False
self._memory = {}
self._handlers = []
self._handlers.append(GreetingStatementHandler())
self._handlers.append(ArithmeticHandler())
self._handlers.append(DeclaredMemoryHandler())
self._handlers.append(DeclarationHandler())
self._handlers.append(ConvoTerminationHandler())
self._handlers.append(ElizaStatementHandler())
self._handlers.append(DefaultStatementHandler())
@property
def conversation_ended(self):
return self._conv_ended
def _mem_store(self, key, val):
self._memory[key] = val
def _dispatch_to_handler(self, statement):
for handler in self._handlers:
copied_mem = deepcopy(self._memory)
if handler.can_handle(statement, copied_mem):
return handler.handle(statement, copied_mem)
raise RuntimeError('No handler registered for statement "%s"' % statement)
def hears(self, statement):
"""Accept the given statement and respond to it
>>> Figaro().hears("Hello there")
'Hello!'
>>> Figaro().hears("5 minus 13")
'-8.0'
>>> Figaro().hears("jibberjabber")
"I'm not sure how to respond to that."
>>> Figaro().hears("Why are you so rude?")
'I was born that way.'
>>> Figaro().hears("Are you deaf?")
'Yes. How about you?'
>>> Figaro().hears("you are really annoying")
'In what way exactly?'
>>> fg = Figaro()
>>> fg.hears("who am i")
"I don't know. You tell me."
>>> fg.hears("alabama is in America.")
'Thanks for letting me know.'
>>> fg.hears("Where is alabama?")
'in America.'
>>> fg.hears("My name is Ishmael")
'Nice to meet you.'
>>> fg.hears("What is my name?")
'Ishmael'
>>> fg.hears("who am I?")
'You told me your name is Ishmael.'
"""
response = self._dispatch_to_handler(statement)
if response.terminated:
self._conv_ended = True
answer, memos = response.answer, response.memo
for memo in memos:
key, val = memo
self._mem_store(key, val)
return answer
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "rylans/figaro",
"path": "figaro/agent.py",
"copies": "1",
"size": "3536",
"license": "apache-2.0",
"hash": -7985159492829491000,
"line_mean": 32.6761904762,
"line_max": 82,
"alpha_frac": 0.6425339367,
"autogenerated": false,
"ratio": 4.045766590389016,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0009003788120253984,
"num_lines": 105
} |
# Agent.py - Use a message queue interface to launch a program and monitor it
#
import os
import signal
import time
import subprocess
from subprocess import Popen as Popen
import yaml
import redis
import magic
import psutil
import json
import plotly
print plotly.__version__ # version >1.9.4 required
from plotly.graph_objs import Layout, Bar, Scatter
def main():
print('Running agent')
r = redis.Redis(host="redis")
msg = r.blpop('queue')
msgdict = eval(msg[1])
ProcessMessage(msgdict)
def ComputePerformanceReport(snapshots, firstSnapshot, currentSnapshot, cpuCores):
# Compute means and print the results
meanMemory = float(sum(x['memory'] for x in snapshots)/len(snapshots))
meanCpu = float(sum(x['cpu'] for x in snapshots)/len(snapshots))
meanFiles = float(sum(x['files'] for x in snapshots)/len(snapshots))
lastSnapshot = snapshots[currentSnapshot % len(snapshots)]
udpPacketSent = int(lastSnapshot['udp-packets-sent']) - int(firstSnapshot['udp-packets-sent'])
udpPacketRcvd = int(lastSnapshot['udp-packets-rcvd']) - int(firstSnapshot['udp-packets-rcvd'])
tcpPacketSent = int(lastSnapshot['tcp-packets-sent']) - int(firstSnapshot['tcp-packets-sent'])
tcpPacketRcvd = int(lastSnapshot['tcp-packets-rcvd']) - int(firstSnapshot['tcp-packets-rcvd'])
print('\n\n\n****************** APPLICATION PERFORMANCE REPORT ******************\n\n')
print('Memory Usage: %f%% CPU Usage: %f%% Open Files: %d\n\n' % (meanMemory, meanCpu, meanFiles))
print('Total UDP Packets Sent: %d Total TCP Packets Sent: %d' % (udpPacketSent, tcpPacketSent))
print('Total UDP Packets Rcvd: %d Total TCP Packets Rcvd: %d\n\n' % (udpPacketRcvd, tcpPacketRcvd))
jsonData = {
'cpuData': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['cpu'] for j in snapshots] },
'meanCpu': meanCpu,
'cpuCores': cpuCores,
'memoryData': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['memory'] for j in snapshots] },
'meanMemory': meanMemory,
'udpPacketsSent': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['udp-packets-sent'] for j in snapshots] },
'udpPacketsRcvd': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['udp-packets-rcvd'] for j in snapshots] },
'tcpPacketsSent': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['tcp-packets-sent'] for j in snapshots] },
'tcpPacketsRcvd': { 'x': [i for i in range(0, len(snapshots))],
'y': [j['tcp-packets-rcvd'] for j in snapshots] },
}
with open('./temp-plot.html', 'w') as htmlFile:
# htmlFile.write(divCpu)
json.dump(jsonData, htmlFile)
def ProcessMessage(msgdict):
# Determine file types of application and test code
APPPREFIX = []
TESTPREFIX = []
PYTHONPATH = [subprocess.check_output(['which', 'python']).rstrip()]
if msgdict['Application'] and magic.from_file(msgdict['Application'].split()[0]) == 'Python script, ASCII text executable':
APPPREFIX = PYTHONPATH
if msgdict['Test'] and magic.from_file(msgdict['Test'].split()[0]) == 'Python script, ASCII text executable':
TESTPREFIX = PYTHONPATH
if msgdict['Application'] is None:
return
appLogFile = open('app.log', 'w')
testLogFile = open('test.log', 'w')
Application = Popen(APPPREFIX + msgdict['Application'].split(), stdout=appLogFile, stderr=subprocess.STDOUT)
print('Application Command: %s pid %d' % (msgdict['Application'], Application.pid))
if msgdict['Test']:
Test = Popen(TESTPREFIX + msgdict['Test'].split(), stdout=testLogFile, stderr=subprocess.STDOUT)
print('Test Command: %s pid %d' % (msgdict['Test'], Test.pid))
else:
print('No test command specified!')
# We keep track of process performance snapshots in the following dictionary:
#
# {
# 'pid1' : [{'cpu': 37.4, 'memory': 102M, 'files': 5}, ...],
# 'pid2' : [{'cpu': 37.4, 'memory': 102M, 'files': 5}, ...]
# }
snapshots = {}
snapshots[Application.pid] = []
currentSnapshot = 0
stream = file('./agent.yaml', 'r')
config = yaml.load(stream)
snapshotSize = config['snapshotSize']
maxSnapshots = config['maxSnapshots']
UdpPacketsSentCmd = "netstat --statistics | grep -A 4 Udp: | grep 'packets sent' | awk '{print $1}'"
UdpPacketsRcvdCmd = "netstat --statistics | grep -A 4 Udp: | grep 'packets received' | awk '{print $1}'"
TcpPacketsSentCmd = "netstat --statistics | grep -A 10 Tcp: | grep 'segments send' | awk '{print $1}'"
TcpPacketsRcvdCmd = "netstat --statistics | grep -A 10 Tcp: | grep 'segments received' | grep -v 'bad' | awk '{print $1}'"
firstSnapshot = None
app_process = psutil.Process(Application.pid)
for i in range(0,maxSnapshots):
total_cpu_percent = 0.0
for child in app_process.children(recursive=True):
try:
total_cpu_percent = total_cpu_percent + child.cpu_percent(interval=0.05)
except:
continue
sndict = { 'cpu': total_cpu_percent/len(app_process.cpu_affinity()),
'system-cpu': psutil.cpu_percent(interval=0.05),
'memory': app_process.memory_info().rss,
'system-memory': psutil.virtual_memory().available,
'files': len(os.listdir('/proc/%d' % Application.pid)),
'udp-packets-sent': subprocess.check_output(UdpPacketsSentCmd, shell=True).rstrip(),
'udp-packets-rcvd': subprocess.check_output(UdpPacketsRcvdCmd, shell=True).rstrip(),
'tcp-packets-sent': subprocess.check_output(TcpPacketsSentCmd, shell=True).rstrip(),
'tcp-packets-rcvd': subprocess.check_output(TcpPacketsRcvdCmd, shell=True).rstrip() }
currentSnapshot += 1
if currentSnapshot <= snapshotSize:
snapshots[Application.pid].append(sndict)
else:
snapshots[Application.pid][currentSnapshot % snapshotSize] = sndict
if not firstSnapshot:
firstSnapshot = sndict
time.sleep( 50.0 / 1000.0 )
os.kill(Application.pid, signal.SIGKILL)
if msgdict['Test']:
os.kill(Test.pid, signal.SIGKILL)
ComputePerformanceReport(snapshots[Application.pid], firstSnapshot, currentSnapshot, len(app_process.cpu_affinity()))
os.system('tail -10 ./app.log')
os.system('tail -4 ./test.log')
os.remove('./app.log')
os.remove('./test.log')
if __name__ == "__main__":
main() | {
"repo_name": "appfit/AppFit",
"path": "rightsize/agent/agent.py",
"copies": "1",
"size": "6747",
"license": "apache-2.0",
"hash": -3557722781710637000,
"line_mean": 43.9866666667,
"line_max": 138,
"alpha_frac": 0.6106417667,
"autogenerated": false,
"ratio": 3.6352370689655173,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47458788356655174,
"avg_score": null,
"num_lines": null
} |
# agents
# agent_id, agent_famname,agent_name
t1=[[100,'Brown','Jack'],
[101,'Red','James'],
[102,'Black','John'],
[103,'White','Jeff'],
[104,'White','Jasper']]
# clients
# client_id,agent_id,client_name
t2=[[100,100,'McDonalds'],
[101,100,'KFC'],
[102,102,'Burger King'],
[103,103,'Chinese'],
[104,999,'French']]
# more agents /agents1
# agent_id, agent_famname,agent_name
t3=[[200,'Smith','Jack'],
[101,'Red','James'],
[201,'Carpenter','John'],
[103,'White','Jeff']]
# restriction
# SQL: select * from agents where agent_famname='White'
res=[row for row in t1 if row[1]=='White']
assert res == [[103, 'White', 'Jeff'],
[104, 'White', 'Jasper']], \
'restriction failed'
# projection
# SQL: select agent_name,agent_famname from agents
res=[[row[2],row[1]] for row in t1 ]
assert res == [['Jack', 'Brown'],
['James', 'Red'],
['John', 'Black'],
['Jeff', 'White'],
['Jasper', 'White']],\
'projection failed'
# cross-product (cartesian product)
# SQL: select * from agents, clients
res= [r1+r2 for r1 in t1 for r2 in t2 ]
assert res == [[100, 'Brown', 'Jack', 100, 100, 'McDonalds'],
[100, 'Brown', 'Jack', 101, 100, 'KFC'],
[100, 'Brown', 'Jack', 102, 102, 'Burger King'],
[100, 'Brown', 'Jack', 103, 103, 'Chinese'],
[100, 'Brown', 'Jack', 104, 999, 'French'],
[101, 'Red', 'James', 100, 100, 'McDonalds'],
[101, 'Red', 'James', 101, 100, 'KFC'],
[101, 'Red', 'James', 102, 102, 'Burger King'],
[101, 'Red', 'James', 103, 103, 'Chinese'],
[101, 'Red', 'James', 104, 999, 'French'],
[102, 'Black', 'John', 100, 100, 'McDonalds'],
[102, 'Black', 'John', 101, 100, 'KFC'],
[102, 'Black', 'John', 102, 102, 'Burger King'],
[102, 'Black', 'John', 103, 103, 'Chinese'],
[102, 'Black', 'John', 104, 999, 'French'],
[103, 'White', 'Jeff', 100, 100, 'McDonalds'],
[103, 'White', 'Jeff', 101, 100, 'KFC'],
[103, 'White', 'Jeff', 102, 102, 'Burger King'],
[103, 'White', 'Jeff', 103, 103, 'Chinese'],
[103, 'White', 'Jeff', 104, 999, 'French'],
[104, 'White', 'Jasper', 100, 100, 'McDonalds'],
[104, 'White', 'Jasper', 101, 100, 'KFC'],
[104, 'White', 'Jasper', 102, 102, 'Burger King'],
[104, 'White', 'Jasper', 103, 103, 'Chinese'],
[104, 'White', 'Jasper', 104, 999, 'French']],\
'cross product failed'
# equi join / inner join
# SQL: select agents.*, clients.* from agents,clients
# where agents.agent_id=clients.agent_id
res= [r1+r2 for r1 in t1 for r2 in t2 if r1[0]==r2[1]]
assert res == [[100, 'Brown', 'Jack', 100, 100, 'McDonalds'],
[100, 'Brown', 'Jack', 101, 100, 'KFC'],
[102, 'Black', 'John', 102, 102, 'Burger King'],
[103, 'White', 'Jeff', 103, 103, 'Chinese']],\
'inner join failed'
# left outer join
# SQL: select agents.*, clients.* from agents left outer join clients
# where agents.agent_id = clients.agent_id
res= [r1+r2 for r1 in t1 for r2 in t2 if r1[0]==r2[1]]+\
[r1+[None]*len(t2[0]) for r1 in t1 if r1[0] not in [r2[1] for r2 in t2]]
assert res == [[100, 'Brown', 'Jack', 100, 100, 'McDonalds'],
[100, 'Brown', 'Jack', 101, 100, 'KFC'],
[102, 'Black', 'John', 102, 102, 'Burger King'],
[103, 'White', 'Jeff', 103, 103, 'Chinese'],
[101, 'Red', 'James', None, None, None],
[104, 'White', 'Jasper', None, None, None]],\
'left outer join failed'
# right outer join
# SQL: select agents.*, clients.* from agents right outer join clients
# where agents.agent_id = clients.agent_id
res= [r1+r2 for r1 in t1 for r2 in t2 if r1[0]==r2[1]]+\
[[None]*len(t1[0])+r2 for r2 in t2 if r2[1] not in [r1[0] for r1 in t1]]
assert res == [[100, 'Brown', 'Jack', 100, 100, 'McDonalds'],
[100, 'Brown', 'Jack', 101, 100, 'KFC'],
[102, 'Black', 'John', 102, 102, 'Burger King'],
[103, 'White', 'Jeff', 103, 103, 'Chinese'],
[None, None, None, 104, 999, 'French']],\
'right outer join failed'
# full outer join
# SQL: select agents.*, clients.* from agents full outer join clients
# where agents.agent_id = clients.agent_id
res= [r1+r2 for r1 in t1 for r2 in t2 if r1[0]==r2[1]]+\
[r1+[None]*len(t2[0]) for r1 in t1 if r1[0] not in [r2[1] for r2 in t2]]+\
[[None]*len(t1[0])+r2 for r2 in t2 if r2[1] not in [r1[0] for r1 in t1]]
assert res == [[100, 'Brown', 'Jack', 100, 100, 'McDonalds'],
[100, 'Brown', 'Jack', 101, 100, 'KFC'],
[102, 'Black', 'John', 102, 102, 'Burger King'],
[103, 'White', 'Jeff', 103, 103, 'Chinese'],
[101, 'Red', 'James', None, None, None],
[104, 'White', 'Jasper', None, None, None],
[None, None, None, 104, 999, 'French']],\
'full join failed'
# union
# SQL: select * from agents union select * from agents1
res=t1+[r2 for r2 in t3 if r2 not in t1]
assert res == [[100, 'Brown', 'Jack'],
[101, 'Red', 'James'],
[102, 'Black', 'John'],
[103, 'White', 'Jeff'],
[104, 'White', 'Jasper'],
[200, 'Smith', 'Jack'],
[201, 'Carpenter', 'John']], \
'union failed'
# intersection
# SQL: select * from agents intersect select * from agents1
res=[r2 for r2 in t3 if r2 in t1]
assert res == [[101, 'Red', 'James'],
[103, 'White', 'Jeff']], \
'intersection failed'
# difference
# SQL: select * from agents minus select * from agents1
res=[r1 for r1 in t1 if r1 not in t3]
assert res == [[100, 'Brown', 'Jack'],
[102, 'Black', 'John'],
[104, 'White', 'Jasper']], \
'difference failed'
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/159974_SQLlike_set_operations_list/recipe-159974.py",
"copies": "1",
"size": "6281",
"license": "mit",
"hash": -4897228186061637000,
"line_mean": 34.6875,
"line_max": 79,
"alpha_frac": 0.4922782996,
"autogenerated": false,
"ratio": 2.9474425152510557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8881576519161795,
"avg_score": 0.011628859137852247,
"num_lines": 176
} |
"""Agent to process the COIL-100 dataset using nap."""
import os
import logging
import argparse
from datetime import datetime
import numpy as np
from lumos.context import Context
from lumos.input import run
from ..vision.visual_system import VisualSystem, FeatureManager
class COILManager(FeatureManager):
"""A visual system manager for processing a single COIL-100 image."""
# Override some FeatureManager parameters
min_duration_unstable = 3.0
def initialize(self, imageIn, timeNow):
FeatureManager.initialize(self, imageIn, timeNow)
# Configure visual system to use equal feature weights and hold gaze at a fixed location (default center)
self.visualSystem.setBuffer('weights', { 'rest': 1.0 })
self.visualSystem.finst_inhibition_enabled = False # to prevent FINST-based inhibition
self.visualSystem.max_hold_duration = 24.0 * 3600.0 # effectively prevent automatic release from hold
self.visualSystem.setBuffer('intent', 'reset')
self.visualSystem.setBuffer('intent', 'hold')
# TODO: Use a better feature encoding scheme allowing the visual system to scan different parts of the image
def process(self, imageIn, timeNow):
keepRunning, imageOut = FeatureManager.process(self, imageIn, timeNow)
if self.state == self.State.STABLE:
self.logger.info("[Final] Mean: {}".format(self.featureVectorMean))
self.logger.info("[Final] S.D.: {}".format(self.featureVectorSD))
self.logger.info("[Final] Feature matrix:\n {}".format("\n ".join("{}: {}".format(label, self.featureMatrixMean[i]) for i, label in enumerate(self.visualSystem.featureLabels))))
return False, imageOut # Return False when done
return keepRunning, imageOut
class COILAgent(object):
image_size = (256, 256) # optimal size can be vary depending on foveal distribution, image size and whether eye movements are enabled or not
input_file_prefix = "obj"
input_file_sep = "__"
input_file_ext = "png"
output_file_prefix = "feat"
output_file_sep = "_"
output_file_ext = "dat"
def __init__(self):
# * Create application context, passing in custom arguments, and get a logger
argParser = argparse.ArgumentParser(add_help=False)
#argParser.add_argument('--in', type=str, default="coil-100", help="path to directory containing input images") # use input_source as directory; default to current directory
argParser.add_argument('--out', type=str, default=None, help="path to output directory") # should this be a common parameter in Context?
argParser.add_argument('--obj', type=str, default="1,101,1", required=False, help="object ID range, right-open interval <start>,<stop>,<step> (no spaces); default: full range")
argParser.add_argument('--view', type=str, default="0,360,5", required=False, help="view angle range in degrees, right-open interval <start>,<stop>,<step> (no spaces); default: full range")
self.context = Context.createInstance(description="COIL-100 image dataset processor", parent_argparsers=[argParser]) # TODO how to gather arg parsers from other interested parties?
self.logger = logging.getLogger(self.__class__.__name__)
# * Parse arguments
self.inDir = self.context.options.input_source # should be an absolute path to a dir with COIL images; if it is a file/camera instead, it will be used as sole input
# TODO also accept wildcards using glob.glob()?
self.outDir = self.context.options.out # just for convenience
self.outFile = None
if self.outDir is not None: # TODO otherwise default to some directory?
if os.path.isdir(self.outDir):
now = datetime.now()
outFilepath = os.path.join(self.outDir, "{}{}{}{}{}.{}".format(self.output_file_prefix, self.output_file_sep, now.strftime('%Y-%m-%d'), self.output_file_sep, now.strftime('%H-%M-%S'), self.output_file_ext))
self.logger.info("Output file: {}".format(outFilepath))
self.outFile = open(outFilepath, 'w') # open output file for storing features (TODO use with.. block instead in start()?)
else:
self.logger.warn("Invalid output directory \"{}\"; no output will be saved".format(self.outDir))
self.outDir = None # TODO create output directory if it doesn't exist
self.objRange = xrange(*(int(x) for x in self.context.options.obj.split(',')))
self.viewRange = xrange(*(int(x) for x in self.context.options.view.split(',')))
# * Create visual system and manager
self.context.update() # get fresh time
self.visSys = VisualSystem(imageSize=self.image_size, timeNow=self.context.timeNow)
self.visMan = COILManager(self.visSys)
def run(self):
if self.outFile is not None:
#self.outFile.write("{}\t{}\t{}\t{}\n".format('obj', 'view', '\t'.join(["{}_mean".format(label) for label in self.visSys.featureLabels]), '\t'.join(["{}_sd".format(label) for label in self.visSys.featureLabels]))) # vector mean and SD
self.outFile.write("{}\t{}\t{}\n".format('obj', 'view', '\t'.join(["{}_{}".format(label, i) for label in self.visSys.featureLabels for i in xrange(self.visSys.num_feature_neurons)]))) # matrix mean
if self.context.isDir: # input source is a directory
# * Run visual input using manager, looping over all specified object images
for obj in self.objRange:
for view in self.viewRange:
# ** Build image file path from object ID and view angle
input_file = os.path.join(self.inDir, "{}{}{}{}.{}".format(self.input_file_prefix, obj, self.input_file_sep, view, self.input_file_ext))
#assert os.path.exists(input_file), "Input file \"{}\" doesn't exist".format(input_file)
if not os.path.exists(input_file):
self.logger.warn("Input file \"{}\" doesn't exist".format(input_file))
continue
self.logger.info("Input file: {}".format(input_file))
# ** Modify context to set image file as input source, and run it through the visual system
self.context.options.input_source = input_file
self.context.isImage = True
print "Running..."
run(self.visMan, resetContextTime=False) # use the same manager so that visual system is only created once
if self.outFile is not None:
#self.outFile.write("{}\t{}\t{}\t{}\n".format(obj, view, '\t'.join(str(feat_mean) for feat_mean in self.visMan.featureVectorMean), '\t'.join(str(feat_sd) for feat_sd in self.visMan.featureVectorSD))) # vector mean and SD
self.outFile.write("{}\t{}\t{}\n".format(obj, view, '\t'.join(str(feat) for feat in self.visMan.featureMatrixMean.flat))) # matrix mean
else:
run(self.visMan, resetContextTime=False) # run on the sole input source (image or video)
if self.outFile is not None:
self.outFile.close()
self.logger.info("Output file closed.")
if __name__ == "__main__":
COILAgent().run()
| {
"repo_name": "napratin/nap",
"path": "nap/agent/coil.py",
"copies": "1",
"size": "6940",
"license": "mit",
"hash": 8889049779163754000,
"line_mean": 56.8333333333,
"line_max": 241,
"alpha_frac": 0.6804034582,
"autogenerated": false,
"ratio": 3.670015864621893,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4850419322821893,
"avg_score": null,
"num_lines": null
} |
"""AgentUpgrade Class"""
from .http_helper import HttpHelper
from .halo_endpoint import HaloEndpoint
class AgentUpgrade(HaloEndpoint):
"""Initializing the AgentUpgrade class:
Args:
session (:class:`cloudpassage.HaloSession`): This will define how you
interact with the Halo API, including proxy settings and API keys
used for authentication.
"""
object_name = "agent_upgrade"
objects_name = "agent_upgrades"
default_endpoint_version = 1
def endpoint(self):
"""Return endpoint for API requests."""
return "/v{}/{}".format(self.endpoint_version, self.objects_name)
def list_all(self):
"""Returns a list of scheduled and started upgrade requests.
Returns:
list: List of dictionary object describing upgrade requests.
"""
endpoint = self.endpoint()
request = HttpHelper(self.session)
response = request.get(endpoint)
return response["upgrades"]
def status(self, upgrade_id):
"""View the progress of each agent upgrade request.
You can make this call within 24 hours after an
upgrade completes to view the completed status.
Args:
upgrade_id (str): The ID of the agent upgrade request job.
Returns:
dict: Dictionary object describing the status of a
specific scheduled and started upgrade request.
"""
endpoint = "/v1/agent_upgrades/%s" % upgrade_id
request = HttpHelper(self.session)
response = request.get(endpoint)
return response
def create(self, **kwargs):
"""Create a request to upgrade agents
Keyword Args:
id (str): Server ID
group_id (str): Server group ID
descendants (boolean): Combined child server group or not
os_type (str): Linux or Windows
agent_version (str): The version of the installed Halo agent
agent_version_gte (str): An agent version that is greater than,
or equal to, the agent version specified
agent_version_gt (str): An agent version that is greater than
the agent version specified
agent_version_lte (str): An agent version that is less than,
or equal to, the agent version specified
agent_version_lt (str): An agent version that is less than
the agent version specified
Returns:
string: agent upgrade request ID.
"""
endpoint = "/v1/agent_upgrades"
request = HttpHelper(self.session)
body = {"upgrade": kwargs}
response = request.post(endpoint, body)
return response["upgrade"]["id"]
def delete(self, upgrade_id):
"""Deletes a scheduled upgrade job that you specify by ID.
If the call is successful, the scheduled upgrade request is
canceled and no action is taken on any of the agents within that job.
Args:
upgrade_id (str):The ID of the agent upgrade request job.
Returns:
None if successful, exceptions otherwise.
"""
endpoint = "/v1/agent_upgrades/%s" % upgrade_id
request = HttpHelper(self.session)
request.delete(endpoint)
return None
| {
"repo_name": "cloudpassage/cloudpassage-halo-python-sdk",
"path": "cloudpassage/agent_upgrade.py",
"copies": "1",
"size": "3326",
"license": "bsd-3-clause",
"hash": 8020971304069133000,
"line_mean": 32.595959596,
"line_max": 77,
"alpha_frac": 0.617558629,
"autogenerated": false,
"ratio": 4.771879483500717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 99
} |
"""agent URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
url(r'^login/$', auth_views.login, name='login'),
url(r'^accounts/login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^daily/', include('daily.urls')),
url(r'^admin/', admin.site.urls),
url(r'^blog$', views.blog, name='blog'),
url(r'^blog1$', views.blog1, name='blog1'),
url(r'^servers/', include('servers.urls')),
url(r'^books_fbv_user/', include('books_fbv_user.urls')),
url(r'^userprofile/', include('userprofile.urls')),
url(r'^facebook/', include('thirdauth.urls')),
url(r'^stats/', include('stats.urls')),
url('', include('social.apps.django_app.urls', namespace='social')),
url('', include('django.contrib.auth.urls', namespace='auth')),
url(r'^index1', views.index1, name='index1'),
url(r'^home/', views.home, name='home'),
url(r'^home', views.home, name='home'),
url(r'^', views.home, name='home'),
]
| {
"repo_name": "kinnevo/kic_alone",
"path": "agent/urls.py",
"copies": "1",
"size": "1763",
"license": "mit",
"hash": -2655886076923446300,
"line_mean": 35.7291666667,
"line_max": 79,
"alpha_frac": 0.6505955757,
"autogenerated": false,
"ratio": 3.3903846153846153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4540980191084615,
"avg_score": null,
"num_lines": null
} |
"""Agent with uses A3C trained network"""
import random
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from loveletter.env import LoveLetterEnv
from loveletter.agents.random import AgentRandom
from loveletter.agents.agent import Agent
from loveletter.trainers.a3c_model import ActorCritic
class AgentA3C(Agent):
'''Agent which leverages Actor Critic Learning'''
def __init__(self,
model_path,
dtype,
seed=451):
self._seed = seed
self._idx = 0
self._dtype = dtype
self.env = LoveLetterEnv(AgentRandom(seed), seed)
state = self.env.reset()
self._model = ActorCritic(
state.shape[0], self.env.action_space).type(dtype)
self._model.load_state_dict(torch.load(model_path))
def _move(self, game):
'''Return move which ends in score hole'''
assert game.active()
self._idx += 1
state = self.env.force(game)
state = torch.from_numpy(state).type(self._dtype)
cx = Variable(torch.zeros(1, 256).type(self._dtype), volatile=True)
hx = Variable(torch.zeros(1, 256).type(self._dtype), volatile=True)
_, logit, (hx, cx) = self._model(
(Variable(state.unsqueeze(0), volatile=True), (hx, cx)))
prob = F.softmax(logit)
action_idx = prob.max(1)[1].data.cpu().numpy()[0, 0]
player_action = self.env.action_from_index(action_idx, game)
if player_action is None:
# print("ouch")
options = Agent.valid_actions(game, self._seed + self._idx)
if len(options) < 1:
raise Exception("Unable to play without actions")
random.seed(self._seed + self._idx)
return random.choice(options)
# print("playing ", self._idx, player_action)
return player_action
| {
"repo_name": "user01/love-letter",
"path": "loveletter/agents/a3c.py",
"copies": "1",
"size": "1900",
"license": "mit",
"hash": -6599662120337115000,
"line_mean": 30.6666666667,
"line_max": 75,
"alpha_frac": 0.6052631579,
"autogenerated": false,
"ratio": 3.6538461538461537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47591093117461536,
"avg_score": null,
"num_lines": null
} |
"""ageo - active geolocation library: core.
"""
__all__ = ('Location', 'Map', 'Observation')
import bisect
import functools
import itertools
import numpy as np
import pyproj
from scipy import sparse
from shapely.geometry import Point, MultiPoint, Polygon, box as Box
from shapely.ops import transform as sh_transform
import tables
import math
import sys
# scipy.sparse.find() materializes vectors which, in several cases
# below, can be enormous. This is slower, but more memory-efficient.
# Code from https://stackoverflow.com/a/31244368/388520 with minor
# modifications.
def iter_csr_nonzero(matrix):
irepeat = itertools.repeat
return zip(
# reconstruct the row indices
itertools.chain.from_iterable(
irepeat(i, r)
for (i,r) in enumerate(matrix.indptr[1:] - matrix.indptr[:-1])
),
# matrix.indices gives the column indices as-is
matrix.indices,
matrix.data
)
def Disk(x, y, radius):
return Point(x, y).buffer(radius)
# Important note: pyproj consistently takes coordinates in lon/lat
# order and distances in meters. lon/lat order makes sense for
# probability matrices, because longitudes are horizontal = columns,
# latitudes are vertical = rows, and scipy matrices are column-major
# (blech). Therefore, this library also consistently uses lon/lat
# order and meters.
# Coordinate transformations used by Location.centroid()
wgs_proj = pyproj.Proj("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
gcen_proj = pyproj.Proj("+proj=geocent +datum=WGS84 +units=m +no_defs")
wgs_to_gcen = functools.partial(pyproj.transform, wgs_proj, gcen_proj)
gcen_to_wgs = functools.partial(pyproj.transform, gcen_proj, wgs_proj)
# ... and Location.area()
cea_proj = pyproj.Proj(proj='cea', ellps='WGS84', lon_0=0, lat_ts=0)
wgs_to_cea = functools.partial(pyproj.transform, wgs_proj, cea_proj)
# Smooth over warts in pyproj.Geod.inv(), which is vectorized
# internally, but does not support numpy-style broadcasting, and
# returns things we don't need. The prebound _Inv and _Bcast are
# strictly performance hacks.
_WGS84geod = pyproj.Geod(ellps='WGS84')
def WGS84dist(lon1, lat1, lon2, lat2, *,
_Inv = _WGS84geod.inv, _Bcast = np.broadcast_arrays):
_, _, dist = _Inv(*_Bcast(lon1, lat1, lon2, lat2))
return dist
def cartesian2(a, b):
"""Cartesian product of two 1D vectors A and B."""
return np.tile(a, len(b)), np.repeat(b, len(a))
def mask_ij(bounds, longitudes, latitudes):
"""Given a rectangle-tuple BOUNDS (west, south, east, north; as
returned by shapely .bounds properties), and sorted grid index
vectors LONGITUDES, LATITUDES, return vectors I, J which give the
x- and y-indices of every grid point within the rectangle.
LATITUDES and LONGITUDES must be sorted.
"""
try:
(west, south, east, north) = bounds
except ValueError as e:
raise ValueError("invalid bounds argument {!r}".format(bounds)) from e
min_i = bisect.bisect_left(longitudes, west)
max_i = bisect.bisect_right(longitudes, east)
min_j = bisect.bisect_left(latitudes, south)
max_j = bisect.bisect_right(latitudes, north)
I = np.array(range(min_i, max_i))
J = np.array(range(min_j, max_j))
return cartesian2(I, J)
def mask_matrix(bounds, longitudes, latitudes):
"""Construct a sparse matrix which is 1 at all latitude+longitude
grid points inside the rectangle BOUNDS, 0 outside.
LATITUDES and LONGITUDES must be sorted.
"""
I, J = mask_ij(bounds, longitudes, latitudes)
return sparse.csr_matrix((np.ones_like(I), (I, J)),
shape=(len(longitudes), len(latitudes)))
class LocationRowOnDisk(tables.IsDescription):
"""The row format of the pytables table used to save Location objects
on disk. See Location.save and Location.load."""
grid_x = tables.UInt32Col()
grid_y = tables.UInt32Col()
longitude = tables.Float64Col()
latitude = tables.Float64Col()
prob_mass = tables.Float32Col()
class Location:
"""An estimated location for a host. This is represented by a
probability mass function over the surface of the Earth, quantized
to a cell grid, and stored as a sparse matrix.
Properties:
resolution - Grid resolution, in meters at the equator
lon_spacing - East-west (longitude) grid resolution, in decimal degrees
lat_spacing - North-south (latitude) grid resolution, in decimal degrees
fuzz - Coastline uncertainty factor, in meters at the equator
north - Northernmost latitude covered by the grid
south - Southernmost latitude ditto
east - Easternmost longitude ditto
west - Westernmost longitude ditto
latitudes - Vector of latitude values corresponding to grid points
longitudes - Vector of longitude values ditto
probability - Probability mass matrix (may be lazily computed)
bounds - Bounding region of the nonzero portion of the
probability mass matrix (may be lazily computed)
centroid - Centroid of the nonzero &c
area - Weighted area of the nonzero &c
covariance - Covariance matrix of the nonzero &c
(relative to the centroid)
rep_pt - "Representative point" of the nonzero &c; see docstring
for exactly what this means
annotations - Dictionary of arbitrary additional metadata; saved and
loaded but not otherwise inspected by this code
You will normally not construct bare Location objects directly, only
Map and Observation objects (these are subclasses). However, any two
Locations can be _intersected_ to produce a new one.
A Location is _vacuous_ if it has no nonzero entries in its
probability matrix.
"""
def __init__(self, *,
resolution, fuzz, lon_spacing, lat_spacing,
north, south, east, west,
longitudes, latitudes,
probability=None, vacuity=None, bounds=None,
centroid=None, covariance=None, rep_pt=None,
loaded_from=None, annotations=None
):
self.resolution = resolution
self.fuzz = fuzz
self.north = north
self.south = south
self.east = east
self.west = west
self.lon_spacing = lon_spacing
self.lat_spacing = lat_spacing
self.longitudes = longitudes
self.latitudes = latitudes
self._probability = probability
self._vacuous = vacuity
self._bounds = bounds
self._centroid = centroid
self._covariance = covariance
self._rep_pt = rep_pt
self._loaded_from = loaded_from
self._area = None
self.annotations = annotations if annotations is not None else {}
@property
def probability(self):
if self._probability is None:
self.compute_probability_matrix_now()
return self._probability
@property
def vacuous(self):
if self._vacuous is None:
self.compute_probability_matrix_now()
return self._vacuous
@property
def centroid(self):
if self._centroid is None:
self.compute_centroid_now()
return self._centroid
@property
def covariance(self):
if self._covariance is None:
self.compute_centroid_now()
return self._covariance
@property
def area(self):
"""Weighted area of the nonzero region of the probability matrix."""
if self._area is None:
# Notionally, each grid point should be treated as a
# rectangle of parallels and meridians _centered_ on the
# point. The area of such a rectangle, however, only
# depends on its latitude and its breadth; the actual
# longitude values don't matter. Since the grid is
# equally spaced, we can use [0],[1] always, and then we
# do not have to worry about crossing the discontinuity at ±180.
west = self.longitudes[0]
east = self.longitudes[1]
# For latitude, the actual values do matter, but the map
# never goes all the way to the poles, and the grid is
# equally spaced, so we can precompute the north-south
# delta from any pair of latitudes and not have to worry
# about running off the ends of the array.
d_lat = (self.latitudes[1] - self.latitudes[0]) / 2
# We don't need X, so throw it away immediately. (We
# don't use iter_csr_nonzero here because we need to
# modify V.)
X, Y, V = sparse.find(self.probability); X = None
# The value vector is supposed to be normalized, but make
# sure it is, and then adjust from 1-overall to 1-per-cell
# normalization.
assert len(V.shape) == 1
S = V.sum()
if S == 0:
return 0
if S != 1:
V /= S
V *= V.shape[0]
area = 0
for y, v in zip(Y, V):
north = self.latitudes[y] + d_lat
south = self.latitudes[y] - d_lat
if not (-90 <= south < north <= 90):
raise AssertionError("expected -90 <= {} < {} <= 90"
.format(south, north))
tile = sh_transform(wgs_to_cea, Box(west, south, east, north))
area += v * tile.area
self._area = area
return self._area
@property
def rep_pt(self, epsilon=1e-8):
"""Representative point of the nonzero region of the probability
matrix. This is, of all points with the greatest probability,
the one closest to the centroid.
"""
if self._rep_pt is None:
lons = self.longitudes
lats = self.latitudes
cen = self.centroid
aeqd_cen = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lon_0=cen[0], lat_0=cen[1])
wgs_to_aeqd = functools.partial(pyproj.transform,
wgs_proj, aeqd_cen)
# mathematically, wgs_to_aeqd(Point(lon, lat)) == Point(0, 0);
# the latter is faster and more precise
cen_pt = Point(0,0)
# It is unacceptably costly to construct a shapely MultiPoint
# out of some locations with large regions (can require more than
# 32GB of scratch memory). Instead, iterate over the points
# one at a time.
max_prob = 0
min_dist = math.inf
rep_pt = None
for x, y, v in iter_csr_nonzero(self.probability):
lon = lons[x]
lat = lats[y]
if rep_pt is None or v > max_prob - epsilon:
dist = WGS84dist(cen[0], cen[1], lon, lat)
# v < max_prob has already been excluded
if (rep_pt is None or v > max_prob or
(v > max_prob - epsilon and dist < min_dist)):
rep_pt = [lon, lat]
max_prob = max(max_prob, v)
min_dist = dist
if rep_pt is None:
rep_pt = cen
else:
rep_pt = np.array(rep_pt)
self._rep_pt = rep_pt
return self._rep_pt
def distance_to_point(self, lon, lat):
"""Find the shortest geodesic distance from (lon, lat) to a nonzero
cell of the probability matrix."""
aeqd_pt = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lon_0=lon, lat_0=lat)
wgs_to_aeqd = functools.partial(pyproj.transform, wgs_proj, aeqd_pt)
# mathematically, wgs_to_aeqd(Point(lon, lat)) == Point(0, 0);
# the latter is faster and more precise
pt = Point(0, 0)
# It is unacceptably costly to construct a shapely MultiPoint
# out of some locations with large regions (requires more than
# 32GB of scratch memory). Instead, iterate over the points
# one at a time.
min_distance = math.inf
for x, y, v in iter_csr_nonzero(self.probability):
cell = sh_transform(wgs_to_aeqd, Point(self.longitudes[x],
self.latitudes[y]))
if pt.distance(cell) - self.resolution*2 < min_distance:
cell = cell.buffer(self.resolution * 3/2)
min_distance = min(min_distance, pt.distance(cell))
if min_distance < self.resolution * 3/2:
return 0
return min_distance
def contains_point(self, lon, lat):
"""True if a grid cell with a nonzero probability contains
or adjoins (lon, lat)."""
i = bisect.bisect_left(self.longitudes, lon)
j = bisect.bisect_left(self.latitudes, lat)
return (self.probability[i-1, j-1] > 0 or
self.probability[i, j-1] > 0 or
self.probability[i+1, j-1] > 0 or
self.probability[i-1, j ] > 0 or
self.probability[i, j ] > 0 or
self.probability[i+1, j ] > 0 or
self.probability[i-1, j+1] > 0 or
self.probability[i, j+1] > 0 or
self.probability[i+1, j+1] > 0)
def compute_probability_matrix_now(self):
"""Compute and set self._probability and self._vacuous.
"""
if self._probability is not None:
return
if self._loaded_from:
self._lazy_load_pmatrix()
else:
M, vac = self.compute_probability_matrix_within(self.bounds)
self._probability = M
self._vacuous = vac
def compute_probability_matrix_within(self, bounds):
"""Subclasses must override if _probability is lazily computed.
Returns a tuple (matrix, vacuous).
"""
assert self._probability is not None
assert self._vacuous is not None
if self._vacuous:
return self._probability, True # 0 everywhere, so 0 within bounds
if bounds.is_empty or bounds.bounds == ():
return (
sparse.csr_matrix((len(self.longitudes),
len(self.latitudes))),
True
)
M = (mask_matrix(bounds.bounds, self.longitudes, self.latitudes)
.multiply(self._probability))
s = M.sum()
if s:
M /= s
return M, False
else:
return M, True
@property
def bounds(self):
if self._bounds is None:
self.compute_bounding_region_now()
return self._bounds
def compute_bounding_region_now(self):
"""Subclasses must implement if necessary:
compute and set self._bounds.
"""
if self._bounds is None and self._loaded_from:
self._lazy_load_pmatrix()
assert self._bounds is not None
def intersection(self, other, bounds=None):
"""Compute the intersection of this object's probability matrix with
OTHER's. If BOUNDS is specified, we don't care about
anything outside that area, and it will become the bounding
region of the result; otherwise this object and OTHER's
bounding regions are intersected first and the computation
is restricted to that region.
"""
if (self.resolution != other.resolution or
self.fuzz != other.fuzz or
self.north != other.north or
self.south != other.south or
self.east != other.east or
self.west != other.west or
self.lon_spacing != other.lon_spacing or
self.lat_spacing != other.lat_spacing):
raise ValueError("can't intersect locations with "
"inconsistent grids")
if bounds is None:
bounds = self.bounds.intersection(other.bounds)
# Compute P(self AND other), but only consider points inside
# BOUNDS. For simplicity we actually look at the quantized
# bounding rectangle of BOUNDS.
M1, V1 = self.compute_probability_matrix_within(bounds)
M2, V2 = other.compute_probability_matrix_within(bounds)
if V1:
M = M1
V = True
elif V2:
M = M2
V = True
else:
M = None
V = False
# Optimization: if M1 and M2 have the same set of nonzero
# entries, and all the nonzero values in one matrix are equal
# or nearly so, then just use the other matrix as the result,
# because the multiply-and-then-normalize operation will be a
# nop.
if (np.array_equal(M1.indptr, M2.indptr) and
np.array_equal(M1.indices, M2.indices)):
if np.allclose(M1.data, M1.data[0]):
M = M2
elif np.allclose(M2.data, M2.data[0]):
M = M1
if M is None:
M = M1.multiply(M2)
s = M.sum()
if s:
M /= s
else:
V = True
M.eliminate_zeros()
return Location(
resolution = self.resolution,
fuzz = self.fuzz,
north = self.north,
south = self.south,
east = self.east,
west = self.west,
lon_spacing = self.lon_spacing,
lat_spacing = self.lat_spacing,
longitudes = self.longitudes,
latitudes = self.latitudes,
probability = M,
vacuity = V,
bounds = bounds
)
def compute_centroid_now(self):
"""Compute the weighted centroid and covariance matrix
of the probability mass function.
"""
if self._centroid is not None: return
# The centroid of a cloud of points is just the average of
# their coordinates, but this only works correctly in
# geocentric Cartesian space, not in lat/long space.
X = []
Y = []
Z = []
for i, j, v in iter_csr_nonzero(self.probability):
lon = self.longitudes[i]
lat = self.latitudes[j]
# PROJ.4 requires a dummy third argument when converting
# to geocentric (this appears to be interpreted as meters
# above/below the datum).
x, y, z = wgs_to_gcen(lon, lat, 0)
if math.isinf(x) or math.isinf(y) or math.isinf(z):
sys.stderr.write("wgs_to_gcen({}, {}, 0) = {}, {}, {}\n"
.format(lon, lat, x, y, z))
else:
X.append(x*v)
Y.append(y*v)
Z.append(z*v)
# We leave the covariance matrix in geocentric terms, since
# I'm not sure how to transform it back to lat/long space, or
# if that even makes sense.
M = np.vstack((X, Y, Z))
self._covariance = np.cov(M)
# Since the probability matrix is normalized, it is not
# necessary to divide the weighted sums by anything to get
# the means.
lon, lat, _ = gcen_to_wgs(*np.sum(M, 1))
if math.isinf(lat) or math.isinf(lon):
raise ValueError("bogus centroid {}/{} - X={} Y={} Z={}"
.format(lat, lon, X, Y, Z))
self._centroid = np.array((lon, lat))
def save(self, fname):
"""Write out this location to an HDF file.
For compactness, we write only the nonzero entries in a
pytables record form, and we _don't_ write out the full
longitude/latitude grid (it can be reconstructed from
the other metadata).
"""
self.compute_centroid_now()
with tables.open_file(fname, mode="w", title="location") as f:
t = f.create_table(f.root, "location",
LocationRowOnDisk, "location",
expectedrows=self.probability.getnnz())
t.attrs.resolution = self.resolution
t.attrs.fuzz = self.fuzz
t.attrs.north = self.north
t.attrs.south = self.south
t.attrs.east = self.east
t.attrs.west = self.west
t.attrs.lon_spacing = self.lon_spacing
t.attrs.lat_spacing = self.lat_spacing
t.attrs.lon_count = len(self.longitudes)
t.attrs.lat_count = len(self.latitudes)
t.attrs.centroid = self.centroid
t.attrs.covariance = self.covariance
if self.annotations:
t.attrs.annotations = self.annotations
cur = t.row
for i, j, pmass in iter_csr_nonzero(self.probability):
lon = self.longitudes[i]
lat = self.latitudes[j]
cur['grid_x'] = i
cur['grid_y'] = j
cur['longitude'] = lon
cur['latitude'] = lat
cur['prob_mass'] = pmass
cur.append()
t.flush()
def _lazy_load_pmatrix(self):
assert self._loaded_from is not None
with tables.open_file(self._loaded_from, "r") as f:
t = f.root.location
M = sparse.dok_matrix((t.attrs.lon_count, t.attrs.lat_count),
dtype=np.float32)
vacuous = True
negative_warning = False
for row in t.iterrows():
pmass = row['prob_mass']
# The occasional zero is normal, but negative numbers
# should never occur.
if pmass > 0:
M[row['grid_x'], row['grid_y']] = pmass
vacuous = False
elif pmass < 0:
if not negative_warning:
sys.stderr.write(fname + ": warning: negative pmass\n")
negative_warning = True
M = M.tocsr()
if vacuous:
wb = 0
eb = 0
sb = 0
nb = 0
else:
i, j = M.nonzero()
wb = self.longitudes[i.min()]
eb = self.longitudes[i.max()]
sb = self.latitudes[j.min()]
nb = self.latitudes[j.max()]
self._probability = M
self._vacuous = vacuous
self._bounds = Box(wb, sb, eb, nb)
@classmethod
def load(cls, fname):
"""Read an HDF file containing a location (the result of save())
and instantiate a Location object from it. The probability
matrix is lazily loaded.
"""
with tables.open_file(fname, "r") as f:
t = f.root.location
longs = np.linspace(t.attrs.west, t.attrs.east,
t.attrs.lon_count)
lats = np.linspace(t.attrs.south, t.attrs.north,
t.attrs.lat_count)
return cls(
resolution = t.attrs.resolution,
fuzz = t.attrs.fuzz,
north = t.attrs.north,
south = t.attrs.south,
east = t.attrs.east,
west = t.attrs.west,
lon_spacing = t.attrs.lon_spacing,
lat_spacing = t.attrs.lat_spacing,
longitudes = longs,
latitudes = lats,
centroid = getattr(t.attrs, 'centroid', None),
covariance = getattr(t.attrs, 'covariance', None),
annotations = getattr(t.attrs, 'annotations', None),
loaded_from = fname
)
class Map(Location):
"""The map on which to locate a host.
Maps are defined by HDF5 files (see maps/ for the program that
generates these from shapefiles) that define a grid over the
surface of the Earth and a "baseline matrix" which specifies
the Bayesian prior probability of locating a host at any point
on that grid. (For instance, nobody puts servers in the middle
of the ocean.)
"""
def __init__(self, mapfile):
with tables.open_file(mapfile, 'r') as f:
M = f.root.baseline
if M.shape[0] == len(M.attrs.longitudes):
baseline = sparse.csr_matrix(M)
elif M.shape[1] == len(M.attrs.longitudes):
baseline = sparse.csr_matrix(M).T
else:
raise RuntimeError(
"mapfile matrix shape {!r} is inconsistent with "
"lon/lat vectors ({},{})"
.format(M.shape,
len(M.attrs.longitudes),
len(M.attrs.latitudes)))
# The probabilities stored in the file are not normalized.
s = baseline.sum()
assert s > 0
baseline /= s
# Note: this bound may not be tight, but it should be
# good enough. It's not obvious to me how to extract
# a tight bounding rectangle from a scipy sparse matrix.
bounds = Box(M.attrs.west, M.attrs.south,
M.attrs.east, M.attrs.north)
if not bounds.is_valid:
bounds = bounds.buffer(0)
assert bounds.is_valid
Location.__init__(
self,
resolution = M.attrs.resolution,
fuzz = M.attrs.fuzz,
north = M.attrs.north,
south = M.attrs.south,
east = M.attrs.east,
west = M.attrs.west,
lon_spacing = M.attrs.lon_spacing,
lat_spacing = M.attrs.lat_spacing,
longitudes = M.attrs.longitudes,
latitudes = M.attrs.latitudes,
probability = baseline,
vacuity = False,
bounds = bounds
)
class Observation(Location):
"""A single observation of the distance to a host.
An observation is defined by a map (used only for its grid;
if you want to intersect the observation with the map, do that
explicitly), the longitude and latitude of a reference point, a
_ranging function_ (see ageo.ranging) that computes probability
as a function of distance, calibration data for the ranging
function (see ageo.calibration) and finally a set of observed
round-trip times.
Both the bounds and the probability matrix are computed lazily.
"""
def __init__(self, *,
basemap, ref_lon, ref_lat,
range_fn, calibration, rtts):
Location.__init__(
self,
resolution = basemap.resolution,
fuzz = basemap.fuzz,
north = basemap.north,
south = basemap.south,
east = basemap.east,
west = basemap.west,
lon_spacing = basemap.lon_spacing,
lat_spacing = basemap.lat_spacing,
longitudes = basemap.longitudes,
latitudes = basemap.latitudes
)
self.ref_lon = ref_lon
self.ref_lat = ref_lat
self.calibration = calibration
self.rtts = rtts
self.range_fn = range_fn(calibration, rtts, basemap.fuzz)
def compute_bounding_region_now(self):
if self._bounds is not None: return
distance_bound = self.range_fn.distance_bound()
# If the distance bound is too close to half the circumference
# of the Earth, the projection operation below will produce an
# invalid polygon. We don't get much use out of a bounding
# region that includes the whole planet but for a tiny disk
# (which will probably be somewhere in the ocean anyway) so
# just give up and say that the bound is the entire planet.
# Similarly, if the distance bound is zero, give up.
if distance_bound > 19975000 or distance_bound == 0:
self._bounds = Box(self.west, self.south, self.east, self.north)
return
# To find all points on the Earth within a certain distance of
# a reference latitude and longitude, back-project onto the
# Earth from an azimuthal-equidistant map with its zero point
# at the reference latitude and longitude.
aeqd = pyproj.Proj(proj='aeqd', ellps='WGS84', datum='WGS84',
lat_0=self.ref_lat, lon_0=self.ref_lon)
try:
disk = sh_transform(
functools.partial(pyproj.transform, aeqd, wgs_proj),
Disk(0, 0, distance_bound))
# Two special cases must be manually dealt with. First, if
# any side of the "circle" (really a many-sided polygon)
# crosses the coordinate singularity at longitude ±180, we
# must replace it with a diversion to either the north or
# south pole (whichever is closer) to ensure that it still
# encloses all of the area it should.
boundary = np.array(disk.boundary)
i = 0
while i < boundary.shape[0] - 1:
if abs(boundary[i+1,0] - boundary[i,0]) > 180:
pole = self.south if boundary[i,1] < 0 else self.north
west = self.west if boundary[i,0] < 0 else self.east
east = self.east if boundary[i,0] < 0 else self.west
boundary = np.insert(boundary, i+1, [
[west, boundary[i,1]],
[west, pole],
[east, pole],
[east, boundary[i+1,1]]
], axis=0)
i += 5
else:
i += 1
# If there were two edges that crossed the singularity and they
# were both on the same side of the equator, the excursions will
# coincide and shapely will be unhappy. buffer(0) corrects this.
disk = Polygon(boundary).buffer(0)
# Second, if the disk is very large, the projected disk might
# enclose the complement of the region that it ought to enclose.
# If it doesn't contain the reference point, we must subtract it
# from the entire map.
origin = Point(self.ref_lon, self.ref_lat)
if not disk.contains(origin):
disk = (Box(self.west, self.south, self.east, self.north)
.difference(disk))
assert disk.is_valid
assert disk.contains(origin)
self._bounds = disk
except Exception as e:
setattr(e, 'offending_disk', disk)
setattr(e, 'offending_obs', self)
raise
def compute_probability_matrix_within(self, bounds):
if not bounds.is_empty and bounds.bounds != ():
I, J = mask_ij(bounds.intersection(self.bounds).bounds,
self.longitudes,
self.latitudes)
pvals = self.range_fn.unnormalized_pvals(
WGS84dist(self.ref_lon,
self.ref_lat,
self.longitudes[I],
self.latitudes[J]))
s = pvals.sum()
if s:
pvals /= s
M = sparse.csr_matrix((pvals, (I, J)),
shape=(len(self.longitudes),
len(self.latitudes)))
M.eliminate_zeros()
return (M, False)
return (
sparse.csr_matrix((len(self.longitudes),
len(self.latitudes))),
True
)
| {
"repo_name": "zackw/active-geolocator",
"path": "lib/ageo/ageo.py",
"copies": "1",
"size": "32510",
"license": "mit",
"hash": -2970625627687039500,
"line_mean": 39.1333333333,
"line_max": 79,
"alpha_frac": 0.5461117263,
"autogenerated": false,
"ratio": 4.096270161290323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142381887590323,
"avg_score": null,
"num_lines": null
} |
# A geology museum in California has six different rocks
# sitting in a row on a shelf, with labels on the shelf
# telling what type of rock each is. An earthquake hits
# and the rocks all fall off the shelf. A janitor comes in
# and, wanting to clean the floor, puts the rocks back on
# the shelf in random order. The probability that the janitor
# put all six rocks behind their correct labels is 1/6!,
# or 1/720. But what are the chances that exactly five rocks
# are in the correct place, exactly four rocks are in the
# correct place, exactly three rocks are in the correct place,
# exactly two rocks are in the correct place, exactly one
# rock is in the correct place, and none of the rocks
# are in the correct place?
from collections import Counter
import itertools
ORDER = 'ABCDEF'
def count_same(input, correct):
count=0
for i,c in zip(input, correct):
if i==c:
count+=1
return count
possibles = [''.join(new_order) for new_order in itertools.permutations(ORDER)]
total = len(possibles)
counts = Counter(count_same(input, ORDER) for input in possibles)
for num,pos, in counts.items():
print("{} : {:2.2%}".format(num, pos/total))
| {
"repo_name": "andrewzwicky/puzzles",
"path": "FiveThirtyEightRiddler/2016-12-16/express.py",
"copies": "1",
"size": "1188",
"license": "mit",
"hash": -152849651564609660,
"line_mean": 37.3225806452,
"line_max": 79,
"alpha_frac": 0.718013468,
"autogenerated": false,
"ratio": 3.394285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4612299182285714,
"avg_score": null,
"num_lines": null
} |
"""ageo.ranging - active geolocation library: ranging functions.
A ranging function computes (non-normalized) probability as a function
of geographic location, given a reference location, calibration data
(see ageo.calibration), and a set of timing observations. This module
provides several different algorithms for this calculation.
"""
import numpy as np
import pyproj
from shapely.geometry import Point
from shapely.ops import transform as sh_transform
from functools import partial
from sys import stderr
from scipy import interpolate, stats
from .calibration import PhysicalLimitsOnly
WGS84_globe = pyproj.Proj(proj='latlong', ellps='WGS84')
def Disk(x, y, radius):
return Point(x, y).buffer(radius)
# Convenience wrappers for forward and inverse geodetic computations
# on the WGS84 ellipsoid, smoothing over some warts in pyproj.Geod.
# inv() and fwd() take coordinates in lon/lat order and distances in
# meters, whereas the rest of this program uses lat/lon order and
# distances in kilometers. They are vectorized internally, but do not
# support numpy-style broadcasting. The prebound _Fwd, _Inv, and
# _Bcast are strictly performance hacks.
_WGS84geod = pyproj.Geod(ellps='WGS84')
def WGS84dist(lat1, lon1, lat2, lon2, *,
_Inv = _WGS84geod.inv, _Bcast = np.broadcast_arrays):
_, _, dist = _Inv(*_Bcast(lon1, lat1, lon2, lat2))
return dist/1000
def WGS84loc(lat, lon, az, dist, *,
_Fwd = _WGS84geod.fwd, _Bcast = np.broadcast_arrays):
tlon, tlat, _ = _Fwd(*_Bcast(lon, lat, az, dist*1000))
return tlat, tlon
# half of the equatorial circumference of the Earth, in meters
# it is impossible for the target to be farther away than this
DISTANCE_LIMIT = 20037508
# PhysicalLimitsOnly instances are data-independent, so we only need two
PHYSICAL_BOUNDS = PhysicalLimitsOnly('physical')
EMPIRICAL_BOUNDS = PhysicalLimitsOnly('empirical')
class RangingFunction:
"""Abstract base class."""
def __init__(self, calibration, rtts, fuzz):
self.calibration = calibration
self.rtts = rtts
self.fuzz = fuzz
def unnormalized_pvals(self, distances):
raise NotImplementedError
def distance_bound(self):
raise NotImplementedError
class MinMax(RangingFunction):
"""An _ideal_ min-max ranging function is a flat nonzero value
for any distance in between the minimum and maximum distances
considered feasible by the calibration, and 0 otherwise.
Because all of the empirical calibration algorithms are liable
to spit out an observation from time to time that's
inconsistent with the global truth, we do not drop the probability
straight to zero immediately at the limits suggested by the
calibration. Instead, we make it fall off linearly to the bounds
given by PHYSICAL_BOUNDS, with a knee at EMPIRICAL_BOUNDS.
"""
def __init__(self, *args, **kwargs):
RangingFunction.__init__(self, *args, **kwargs)
min_cal, max_cal = \
self.calibration.distance_range(self.rtts)
min_emp, max_emp = EMPIRICAL_BOUNDS.distance_range(self.rtts)
min_phy, max_phy = PHYSICAL_BOUNDS.distance_range(self.rtts)
self.bounds = [
min(DISTANCE_LIMIT, max(0, val))
for val in
(min_cal, max_cal, min_emp, max_emp, min_phy, max_phy)]
self.bounds.sort()
self.interpolant = interpolate.interp1d(
self.bounds,
[0, .75, 1, 1, .75, 0],
kind = 'linear',
fill_value = 0,
bounds_error = False
)
def distance_bound(self):
return self.bounds[-1]
def unnormalized_pvals(self, dist):
return self.interpolant(dist)
class Gaussian(RangingFunction):
"""A Gaussian ranging function is simply the pdf of a normal
distribution with mean and standard deviation given by the
calibration.
Right now it only makes sense to use ranging.Gaussian with
calibration.Spotter, and this class has intimate knowledge
of calibration.Spotter's internals.
For the reasons discussed above, the outer distance bound for
this function is the PHYSICAL_BOUNDS distance bound, and this
is also used as a "clip" on the pdf (which is nonzero
everywhere).
"""
def __init__(self, *args, **kwargs):
RangingFunction.__init__(self, *args, **kwargs)
if not hasattr(self.calibration, '_mu') \
or not hasattr(self.calibration, '_sigma'):
raise TypeError("Gaussian ranging function requires a "
"calibration with _mu and _sigma")
min_cal, max_cal = \
self.calibration.distance_range(self.rtts)
min_phy, max_phy = PHYSICAL_BOUNDS.distance_range(self.rtts)
self._distance_bound = max(min_cal, max_cal, min_phy, max_phy)
# FIXME: this logic belongs in calibration.Spotter.
med_rtt = np.percentile(self.rtts, .25)
mu = self.calibration._mu(med_rtt)
sigma = self.calibration._sigma(med_rtt)
# mu and sigma can go negative when two nodes are very close together,
# which causes stats.norm.pdf() to spit out nothing but NaN.
mu = max(mu, 1000) # lower bound at 1km
sigma = max(sigma, 1000/3) # lower bound at 3sigma=1km
self._distribution = stats.norm(
loc = mu,
scale = sigma
)
def distance_bound(self):
return self._distance_bound
def unnormalized_pvals(self, dist):
rv = self._distribution.pdf(dist)
if not np.isfinite(rv).all():
ve = ValueError("pdf returned non-finite values")
ve.req_domain = dist
ve.req_range = rv
ve.range_fn = self
raise ve
rv[dist > self._distance_bound] = 0
return rv
| {
"repo_name": "zackw/active-geolocator",
"path": "lib/ageo/ranging.py",
"copies": "1",
"size": "5898",
"license": "mit",
"hash": 4996189226042431000,
"line_mean": 37.0516129032,
"line_max": 78,
"alpha_frac": 0.6583587657,
"autogenerated": false,
"ratio": 3.768690095846645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4927048861546645,
"avg_score": null,
"num_lines": null
} |
ages = [16, 91, 29, 38, 14, 22]
print(ages)
print(ages[1])
ages.append(100)
print(ages)
ages.append(20)
ages.append(20)
ages.append(20)
print(ages)
print(ages.count(20))
print(ages.index(20))
ages.insert(1, 50)
print(ages)
ages.insert(1, 60)
print(ages)
ages.remove(20)
print(ages)
ages.reverse()
print(ages)
ages.reverse()
ages.sort()
print(ages)
ages.reverse()
print(ages)
stack = []
for i in range(10):
stack.append(i)
print(stack)
stack.append(10)
print(stack)
n = stack.pop()
m = stack.pop()
print(stack)
queue = []
for l in range(10):
queue.append(l)
print(queue)
queue.append(50)
queue.append(60)
queue.append(70)
print(queue)
n = queue[0]
queue.remove(n)
print(queue)
grid = [
[1,2,3],
[4,5,6],
[7,8,9]]
print(grid)
print(grid[0])
print(len(grid[0]))
for n in grid[0]: print(n)
grid[0][0] = 100
grid[0][1] = 200
grid[0][2] = 300
print(grid[0])
grid = [
[10 for col in range(10)]
for row in range(10)]
for row in grid: print(row)
level = [
1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,
3,3,3,3,3,3,3,3,3,3,3,3,
1,1,1,1,1,1,1,1,1,1,1,1,
1,1,1,1,1,0,0,1,1,1,1,1,
1,1,1,1,1,0,0,1,1,1,1,1,
1,1,1,1,1,1,1,1,1,1,1,1,
3,3,3,3,3,3,3,3,3,3,3,3,
2,2,2,2,2,2,2,2,2,2,2,2,
1,1,1,1,1,1,1,1,1,1,1,1]
print(grid)
for row in range(10):
s = ""
for col in range(12):
s += str(level[row*10+col]) + " "
print(s)
level = [
[1,1,1,1,1,1,1,1,1,1,1,1],
[2,2,2,2,2,2,2,2,2,2,2,2],
[3,3,3,3,3,3,3,3,3,3,3,3],
[1,1,1,1,1,1,1,1,1,1,1,1],
[1,1,1,1,1,0,0,1,1,1,1,1],
[1,1,1,1,1,0,0,1,1,1,1,1],
[1,1,1,1,1,1,1,1,1,1,1,1],
[3,3,3,3,3,3,3,3,3,3,3,3],
[2,2,2,2,2,2,2,2,2,2,2,2],
[1,1,1,1,1,1,1,1,1,1,1,1]]
for row in level: print(row)
tuple1 = (1,2,3,4,5)
print(tuple1)
a,b,c,d,e = tuple1
print(a,b,c,d,e)
data = (100 for n in range(10))
for n in data: print(n)
level = (
(1,1,1,1,1,1,1,1,1,1,1,1),
(2,2,2,2,2,2,2,2,2,2,2,2),
(3,3,3,3,3,3,3,3,3,3,3,3),
(1,1,1,1,1,1,1,1,1,1,1,1),
(1,1,1,1,1,0,0,1,1,1,1,1),
(1,1,1,1,1,0,0,1,1,1,1,1),
(1,1,1,1,1,1,1,1,1,1,1,1),
(3,3,3,3,3,3,3,3,3,3,3,3),
(2,2,2,2,2,2,2,2,2,2,2,2),
(1,1,1,1,1,1,1,1,1,1,1,1))
for row in level: print(row)
names = ("john","jane","dave","robert","andrea","susan")
print(names)
print(names.index("dave"))
print("jane" in names)
print("bob" in names)
print(names.count("susan"))
print(len(names))
| {
"repo_name": "smartdong/PythonPractise",
"path": "Chapter 09/example.py",
"copies": "1",
"size": "2603",
"license": "mit",
"hash": -974050457698880400,
"line_mean": 16.4609929078,
"line_max": 56,
"alpha_frac": 0.5059546677,
"autogenerated": false,
"ratio": 1.7707482993197279,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.2776702967019728,
"avg_score": null,
"num_lines": null
} |
"""A gevent based handler."""
from __future__ import absolute_import
import atexit
import logging
import gevent
import gevent.event
import gevent.queue
import gevent.select
import gevent.thread
from gevent.queue import Empty
from gevent.queue import Queue
from gevent import socket
try:
from gevent.lock import Semaphore, RLock
except ImportError:
from gevent.coros import Semaphore, RLock
from kazoo.handlers.utils import create_tcp_socket, create_tcp_connection
_using_libevent = gevent.__version__.startswith('0.')
log = logging.getLogger(__name__)
_STOP = object()
AsyncResult = gevent.event.AsyncResult
class SequentialGeventHandler(object):
"""Gevent handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially.
Each queue type has a greenlet worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch callbacks should avoid blocking behavior as the next callback
of that type won't be run until it completes. If you need to block,
spawn a new greenlet and return immediately so callbacks can
proceed.
"""
name = "sequential_gevent_handler"
sleep_func = staticmethod(gevent.sleep)
def __init__(self):
"""Create a :class:`SequentialGeventHandler` instance"""
self.callback_queue = Queue()
self._running = False
self._async = None
self._state_change = Semaphore()
self._workers = []
class timeout_exception(gevent.event.Timeout):
def __init__(self, msg):
gevent.event.Timeout.__init__(self, exception=msg)
def _create_greenlet_worker(self, queue):
def greenlet_worker():
while True:
try:
func = queue.get()
if func is _STOP:
break
func()
except Empty:
continue
except Exception as exc:
log.warning("Exception in worker greenlet")
log.exception(exc)
return gevent.spawn(greenlet_worker)
def start(self):
"""Start the greenlet workers."""
with self._state_change:
if self._running:
return
self._running = True
# Spawn our worker greenlets, we have
# - A callback worker for watch events to be called
for queue in (self.callback_queue,):
w = self._create_greenlet_worker(queue)
self._workers.append(w)
atexit.register(self.stop)
def stop(self):
"""Stop the greenlet workers and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.callback_queue,):
queue.put(_STOP)
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = Queue() # pragma: nocover
if hasattr(atexit, "unregister"):
atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return gevent.select.select(*args, **kwargs)
def socket(self, *args, **kwargs):
return create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return create_tcp_connection(socket, *args, **kwargs)
def event_object(self):
"""Create an appropriate Event object"""
return gevent.event.Event()
def lock_object(self):
"""Create an appropriate Lock object"""
return gevent.thread.allocate_lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance
The :class:`AsyncResult` instance will have its completion
callbacks executed in the thread the
:class:`SequentialGeventHandler` is created in (which should be
the gevent/main thread).
"""
return AsyncResult()
def spawn(self, func, *args, **kwargs):
"""Spawn a function to run asynchronously"""
return gevent.spawn(func, *args, **kwargs)
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialGeventHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "azureplus/hue",
"path": "desktop/core/ext-py/kazoo-2.0/kazoo/handlers/gevent.py",
"copies": "36",
"size": "4918",
"license": "apache-2.0",
"hash": 9137988590901690000,
"line_mean": 29.5465838509,
"line_max": 73,
"alpha_frac": 0.6185441236,
"autogenerated": false,
"ratio": 4.553703703703704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 161
} |
"""A gevent based handler."""
from __future__ import absolute_import
import logging
import gevent
from gevent import socket
import gevent.event
import gevent.queue
import gevent.select
import gevent.thread
try:
from gevent.lock import Semaphore, RLock
except ImportError:
from gevent.coros import Semaphore, RLock
from kazoo.handlers import utils
from kazoo import python2atexit
_using_libevent = gevent.__version__.startswith('0.')
log = logging.getLogger(__name__)
_STOP = object()
AsyncResult = gevent.event.AsyncResult
class SequentialGeventHandler(object):
"""Gevent handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially.
Each queue type has a greenlet worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch callbacks should avoid blocking behavior as the next callback
of that type won't be run until it completes. If you need to block,
spawn a new greenlet and return immediately so callbacks can
proceed.
"""
name = "sequential_gevent_handler"
queue_impl = gevent.queue.Queue
queue_empty = gevent.queue.Empty
sleep_func = staticmethod(gevent.sleep)
def __init__(self):
"""Create a :class:`SequentialGeventHandler` instance"""
self.callback_queue = self.queue_impl()
self._running = False
self._async = None
self._state_change = Semaphore()
self._workers = []
@property
def running(self):
return self._running
class timeout_exception(gevent.Timeout):
def __init__(self, msg):
gevent.Timeout.__init__(self, exception=msg)
def _create_greenlet_worker(self, queue):
def greenlet_worker():
while True:
try:
func = queue.get()
try:
if func is _STOP:
break
func()
except Exception as exc:
log.warning("Exception in worker greenlet")
log.exception(exc)
finally:
del func # release before possible idle
except self.queue_empty:
continue
return gevent.spawn(greenlet_worker)
def start(self):
"""Start the greenlet workers."""
with self._state_change:
if self._running:
return
self._running = True
# Spawn our worker greenlets, we have
# - A callback worker for watch events to be called
for queue in (self.callback_queue,):
w = self._create_greenlet_worker(queue)
self._workers.append(w)
python2atexit.register(self.stop)
def stop(self):
"""Stop the greenlet workers and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.callback_queue,):
queue.put(_STOP)
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = self.queue_impl() # pragma: nocover
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return gevent.select.select(*args, **kwargs)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return gevent.event.Event()
def lock_object(self):
"""Create an appropriate Lock object"""
return gevent.thread.allocate_lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance
The :class:`AsyncResult` instance will have its completion
callbacks executed in the thread the
:class:`SequentialGeventHandler` is created in (which should be
the gevent/main thread).
"""
return AsyncResult()
def spawn(self, func, *args, **kwargs):
"""Spawn a function to run asynchronously"""
return gevent.spawn(func, *args, **kwargs)
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialGeventHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "python-zk/kazoo",
"path": "kazoo/handlers/gevent.py",
"copies": "3",
"size": "5194",
"license": "apache-2.0",
"hash": 8759011419898342000,
"line_mean": 29.5529411765,
"line_max": 71,
"alpha_frac": 0.6110897189,
"autogenerated": false,
"ratio": 4.560140474100088,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 170
} |
"""A gevent based handler."""
from __future__ import absolute_import
import logging
import gevent
import gevent.event
import gevent.queue
import gevent.select
import gevent.thread
from gevent.queue import Empty
from gevent.queue import Queue
from gevent import socket
try:
from gevent.lock import Semaphore, RLock
except ImportError:
from gevent.coros import Semaphore, RLock
from kazoo.handlers import utils
from kazoo import python2atexit
_using_libevent = gevent.__version__.startswith('0.')
log = logging.getLogger(__name__)
_STOP = object()
AsyncResult = gevent.event.AsyncResult
class SequentialGeventHandler(object):
"""Gevent handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially.
Each queue type has a greenlet worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch callbacks should avoid blocking behavior as the next callback
of that type won't be run until it completes. If you need to block,
spawn a new greenlet and return immediately so callbacks can
proceed.
"""
name = "sequential_gevent_handler"
sleep_func = staticmethod(gevent.sleep)
def __init__(self):
"""Create a :class:`SequentialGeventHandler` instance"""
self.callback_queue = Queue()
self._running = False
self._async = None
self._state_change = Semaphore()
self._workers = []
class timeout_exception(gevent.event.Timeout):
def __init__(self, msg):
gevent.event.Timeout.__init__(self, exception=msg)
def _create_greenlet_worker(self, queue):
def greenlet_worker():
while True:
try:
func = queue.get()
if func is _STOP:
break
func()
except Empty:
continue
except Exception as exc:
log.warning("Exception in worker greenlet")
log.exception(exc)
return gevent.spawn(greenlet_worker)
def start(self):
"""Start the greenlet workers."""
with self._state_change:
if self._running:
return
self._running = True
# Spawn our worker greenlets, we have
# - A callback worker for watch events to be called
for queue in (self.callback_queue,):
w = self._create_greenlet_worker(queue)
self._workers.append(w)
python2atexit.register(self.stop)
def stop(self):
"""Stop the greenlet workers and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.callback_queue,):
queue.put(_STOP)
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = Queue() # pragma: nocover
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return gevent.select.select(*args, **kwargs)
def socket(self, *args, **kwargs):
return utils.create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return utils.create_tcp_connection(socket, *args, **kwargs)
def create_socket_pair(self):
return utils.create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return gevent.event.Event()
def lock_object(self):
"""Create an appropriate Lock object"""
return gevent.thread.allocate_lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance
The :class:`AsyncResult` instance will have its completion
callbacks executed in the thread the
:class:`SequentialGeventHandler` is created in (which should be
the gevent/main thread).
"""
return AsyncResult()
def spawn(self, func, *args, **kwargs):
"""Spawn a function to run asynchronously"""
return gevent.spawn(func, *args, **kwargs)
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialGeventHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "johankaito/fufuka",
"path": "microblog/venv/lib/python2.7/site-packages/kazoo/handlers/gevent.py",
"copies": "13",
"size": "4954",
"license": "apache-2.0",
"hash": 6892760098569892000,
"line_mean": 29.3926380368,
"line_max": 71,
"alpha_frac": 0.6215179653,
"autogenerated": false,
"ratio": 4.515952597994531,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 163
} |
"""A gevent based handler."""
from __future__ import absolute_import
import logging
import gevent
import gevent.event
import gevent.queue
import gevent.select
import gevent.thread
import kazoo.python2atexit as python2atexit
from gevent.queue import Empty
from gevent.queue import Queue
from gevent import socket
try:
from gevent.lock import Semaphore, RLock
except ImportError:
from gevent.coros import Semaphore, RLock
from kazoo.handlers.utils import create_tcp_socket, create_tcp_connection, create_socket_pair
_using_libevent = gevent.__version__.startswith('0.')
log = logging.getLogger(__name__)
_STOP = object()
AsyncResult = gevent.event.AsyncResult
class SequentialGeventHandler(object):
"""Gevent handler for sequentially executing callbacks.
This handler executes callbacks in a sequential manner. A queue is
created for each of the callback events, so that each type of event
has its callback type run sequentially.
Each queue type has a greenlet worker that pulls the callback event
off the queue and runs it in the order the client sees it.
This split helps ensure that watch callbacks won't block session
re-establishment should the connection be lost during a Zookeeper
client call.
Watch callbacks should avoid blocking behavior as the next callback
of that type won't be run until it completes. If you need to block,
spawn a new greenlet and return immediately so callbacks can
proceed.
"""
name = "sequential_gevent_handler"
sleep_func = staticmethod(gevent.sleep)
def __init__(self):
"""Create a :class:`SequentialGeventHandler` instance"""
self.callback_queue = Queue()
self._running = False
self._async = None
self._state_change = Semaphore()
self._workers = []
class timeout_exception(gevent.event.Timeout):
def __init__(self, msg):
gevent.event.Timeout.__init__(self, exception=msg)
def _create_greenlet_worker(self, queue):
def greenlet_worker():
while True:
try:
func = queue.get()
if func is _STOP:
break
func()
except Empty:
continue
except Exception as exc:
log.warning("Exception in worker greenlet")
log.exception(exc)
return gevent.spawn(greenlet_worker)
def start(self):
"""Start the greenlet workers."""
with self._state_change:
if self._running:
return
self._running = True
# Spawn our worker greenlets, we have
# - A callback worker for watch events to be called
for queue in (self.callback_queue,):
w = self._create_greenlet_worker(queue)
self._workers.append(w)
python2atexit.register(self.stop)
def stop(self):
"""Stop the greenlet workers and empty all queues."""
with self._state_change:
if not self._running:
return
self._running = False
for queue in (self.callback_queue,):
queue.put(_STOP)
while self._workers:
worker = self._workers.pop()
worker.join()
# Clear the queues
self.callback_queue = Queue() # pragma: nocover
python2atexit.unregister(self.stop)
def select(self, *args, **kwargs):
return gevent.select.select(*args, **kwargs)
def socket(self, *args, **kwargs):
return create_tcp_socket(socket)
def create_connection(self, *args, **kwargs):
return create_tcp_connection(socket, *args, **kwargs)
def socketpair(self, *args, **kwargs):
return create_socket_pair(socket)
def event_object(self):
"""Create an appropriate Event object"""
return gevent.event.Event()
def lock_object(self):
"""Create an appropriate Lock object"""
return gevent.thread.allocate_lock()
def rlock_object(self):
"""Create an appropriate RLock object"""
return RLock()
def async_result(self):
"""Create a :class:`AsyncResult` instance
The :class:`AsyncResult` instance will have its completion
callbacks executed in the thread the
:class:`SequentialGeventHandler` is created in (which should be
the gevent/main thread).
"""
return AsyncResult()
def spawn(self, func, *args, **kwargs):
"""Spawn a function to run asynchronously"""
return gevent.spawn(func, *args, **kwargs)
def dispatch_callback(self, callback):
"""Dispatch to the callback object
The callback is put on separate queues to run depending on the
type as documented for the :class:`SequentialGeventHandler`.
"""
self.callback_queue.put(lambda: callback.func(*callback.args))
| {
"repo_name": "max0d41/kazoo",
"path": "kazoo/handlers/gevent.py",
"copies": "1",
"size": "5019",
"license": "apache-2.0",
"hash": 3758709374374409000,
"line_mean": 29.6036585366,
"line_max": 93,
"alpha_frac": 0.6234309623,
"autogenerated": false,
"ratio": 4.489266547406082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5612697509706082,
"avg_score": null,
"num_lines": null
} |
'''A Gevent-based worker'''
import os
import gevent
import gevent.pool
from six import next
from . import Worker
from qless import logger
class GeventWorker(Worker):
'''A Gevent-based worker'''
def __init__(self, *args, **kwargs):
Worker.__init__(self, *args, **kwargs)
# Should we shut down after this?
self.shutdown = False
# A mapping of jids to the greenlets handling them
self.greenlets = {}
count = kwargs.pop('greenlets', 10)
self.pool = gevent.pool.Pool(count)
# A list of the sandboxes that we'll use
self.sandbox = kwargs.pop(
'sandbox', os.path.join(os.getcwd(), 'qless-py-workers'))
self.sandboxes = [
os.path.join(self.sandbox, 'greenlet-%i' % i) for i in range(count)]
def process(self, job):
'''Process a job'''
sandbox = self.sandboxes.pop(0)
try:
with Worker.sandbox(sandbox):
job.sandbox = sandbox
job.process()
finally:
# Delete its entry from our greenlets mapping
self.greenlets.pop(job.jid, None)
self.sandboxes.append(sandbox)
def kill(self, jid):
'''Stop the greenlet processing the provided jid'''
greenlet = self.greenlets.get(jid)
if greenlet is not None:
logger.warn('Lost ownership of %s' % jid)
greenlet.kill()
def run(self):
'''Work on jobs'''
# Register signal handlers
self.signals()
# Start listening
with self.listener():
try:
generator = self.jobs()
while not self.shutdown:
self.pool.wait_available()
job = next(generator)
if job:
# For whatever reason, doing imports within a greenlet
# (there's one implicitly invoked in job.process), was
# throwing exceptions. The hacky way to get around this
# is to force the import to happen before the greenlet
# is spawned.
job.klass
greenlet = gevent.Greenlet(self.process, job)
self.greenlets[job.jid] = greenlet
self.pool.start(greenlet)
else:
logger.debug('Sleeping for %fs' % self.interval)
gevent.sleep(self.interval)
except StopIteration:
logger.info('Exhausted jobs')
finally:
logger.info('Waiting for greenlets to finish')
self.pool.join()
| {
"repo_name": "seomoz/qless-py",
"path": "qless/workers/greenlet.py",
"copies": "1",
"size": "2720",
"license": "mit",
"hash": -4550340432083189000,
"line_mean": 34.7894736842,
"line_max": 80,
"alpha_frac": 0.5261029412,
"autogenerated": false,
"ratio": 4.556113902847571,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00016244314489928524,
"num_lines": 76
} |
"""Agglomerates storm-centered radar images by SPC date.
In other words, this script converts the file structure from one file per
field/height pair per time step to one file per field/height pair per SPC date.
"""
import argparse
import numpy
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.deep_learning import storm_images
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
STORM_IMAGE_DIR_ARG_NAME = 'storm_image_dir_name'
RADAR_SOURCE_ARG_NAME = 'radar_source'
SPC_DATE_ARG_NAME = 'spc_date_string'
RADAR_FIELD_NAMES_ARG_NAME = 'radar_field_names'
RADAR_HEIGHTS_ARG_NAME = 'radar_heights_m_agl'
REFL_HEIGHTS_ARG_NAME = 'refl_heights_m_agl'
STORM_IMAGE_DIR_HELP_STRING = (
'Name of top-level with storm-centered radar images. One-time files '
'therein will be found by `storm_images.find_storm_image_file`; one-time '
'files will be read therefrom by `storm_images.write_storm_images`; and '
'one-day files will be written thereto by `storm_images.read_storm_images`.'
)
RADAR_SOURCE_HELP_STRING = (
'Data source. Must belong to the following list.\n{0:s}'
).format(str(radar_utils.DATA_SOURCE_IDS))
SPC_DATE_HELP_STRING = (
'SPC (Storm Prediction Center) date in format "yyyymmdd". Files will be '
'agglomerated for each field, height, and storm object on this date.'
)
RADAR_FIELD_NAMES_HELP_STRING = (
'List with names of radar fields. Each must belong to the following list.'
'\n{0:s}'
).format(str(radar_utils.RADAR_FIELD_NAMES))
RADAR_HEIGHTS_HELP_STRING = (
'[used only if {0:s} = "{1:s}"] List of radar heights (metres above ground '
'level).'
).format(RADAR_SOURCE_ARG_NAME, radar_utils.GRIDRAD_SOURCE_ID)
REFL_HEIGHTS_HELP_STRING = (
'[used only if {0:s} != "{1:s}"] List of reflectivity heights (metres above'
' ground level).'
).format(RADAR_SOURCE_ARG_NAME, radar_utils.GRIDRAD_SOURCE_ID)
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + STORM_IMAGE_DIR_ARG_NAME, type=str, required=True,
help=STORM_IMAGE_DIR_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_SOURCE_ARG_NAME, type=str, required=True,
help=RADAR_SOURCE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + SPC_DATE_ARG_NAME, type=str, required=True,
help=SPC_DATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_FIELD_NAMES_ARG_NAME, type=str, nargs='+', required=True,
help=RADAR_FIELD_NAMES_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + RADAR_HEIGHTS_ARG_NAME, type=int, nargs='+', required=False,
default=storm_images.DEFAULT_RADAR_HEIGHTS_M_AGL,
help=RADAR_HEIGHTS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + REFL_HEIGHTS_ARG_NAME, type=int, nargs='+', required=False,
default=storm_images.DEFAULT_RADAR_HEIGHTS_M_AGL,
help=REFL_HEIGHTS_HELP_STRING)
def _run(
top_storm_image_dir_name, radar_source, spc_date_string,
radar_field_names, radar_heights_m_agl, refl_heights_m_agl):
"""Agglomerates storm-centered radar images by SPC date.
This is effectively the main method.
:param top_storm_image_dir_name: See documentation at top of file.
:param radar_source: Same.
:param spc_date_string: Same.
:param radar_field_names: Same.
:param radar_heights_m_agl: Same.
:param refl_heights_m_agl: Same.
"""
start_time_unix_sec = time_conversion.get_start_of_spc_date(spc_date_string)
end_time_unix_sec = time_conversion.get_end_of_spc_date(spc_date_string)
if radar_source == radar_utils.GRIDRAD_SOURCE_ID:
file_dict = storm_images.find_many_files_gridrad(
top_directory_name=top_storm_image_dir_name,
radar_field_names=radar_field_names,
radar_heights_m_agl=radar_heights_m_agl,
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec,
one_file_per_time_step=True)
image_file_name_matrix = file_dict[storm_images.IMAGE_FILE_NAMES_KEY]
num_times = image_file_name_matrix.shape[0]
num_field_height_pairs = (
image_file_name_matrix.shape[1] * image_file_name_matrix.shape[2]
)
image_file_name_matrix = numpy.reshape(
image_file_name_matrix, (num_times, num_field_height_pairs))
else:
file_dict = storm_images.find_many_files_myrorss_or_mrms(
top_directory_name=top_storm_image_dir_name,
radar_source=radar_source, radar_field_names=radar_field_names,
start_time_unix_sec=start_time_unix_sec,
end_time_unix_sec=end_time_unix_sec, one_file_per_time_step=True,
reflectivity_heights_m_agl=refl_heights_m_agl,
raise_error_if_all_missing=True,
raise_error_if_any_missing=True)
image_file_name_matrix = file_dict[storm_images.IMAGE_FILE_NAMES_KEY]
num_times = image_file_name_matrix.shape[0]
num_field_height_pairs = image_file_name_matrix.shape[1]
print(SEPARATOR_STRING)
for j in range(num_field_height_pairs):
storm_image_dict = None
for i in range(num_times):
if image_file_name_matrix[i, j] == '':
continue
print('Reading data from: "{0:s}"...'.format(
image_file_name_matrix[i, j]))
if storm_image_dict is None:
storm_image_dict = storm_images.read_storm_images(
netcdf_file_name=image_file_name_matrix[i, j]
)
else:
this_storm_image_dict = storm_images.read_storm_images(
netcdf_file_name=image_file_name_matrix[i, j]
)
storm_image_dict[storm_images.FULL_IDS_KEY] += (
this_storm_image_dict[storm_images.FULL_IDS_KEY]
)
storm_image_dict[storm_images.VALID_TIMES_KEY] = (
numpy.concatenate((
storm_image_dict[storm_images.VALID_TIMES_KEY],
this_storm_image_dict[storm_images.VALID_TIMES_KEY]
))
)
storm_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY] = (
numpy.concatenate((
storm_image_dict[storm_images.STORM_IMAGE_MATRIX_KEY],
this_storm_image_dict[
storm_images.STORM_IMAGE_MATRIX_KEY]
), axis=0)
)
this_file_name = storm_images.find_storm_image_file(
top_directory_name=top_storm_image_dir_name,
spc_date_string=spc_date_string, radar_source=radar_source,
radar_field_name=storm_image_dict[
storm_images.RADAR_FIELD_NAME_KEY],
radar_height_m_agl=storm_image_dict[storm_images.RADAR_HEIGHT_KEY],
raise_error_if_missing=False)
print('Writing data to: "{0:s}"...'.format(this_file_name))
storm_images.write_storm_images(
netcdf_file_name=this_file_name,
storm_image_matrix=storm_image_dict[
storm_images.STORM_IMAGE_MATRIX_KEY],
full_id_strings=storm_image_dict[storm_images.FULL_IDS_KEY],
valid_times_unix_sec=storm_image_dict[storm_images.VALID_TIMES_KEY],
radar_field_name=storm_image_dict[
storm_images.RADAR_FIELD_NAME_KEY],
radar_height_m_agl=storm_image_dict[storm_images.RADAR_HEIGHT_KEY],
rotated_grids=storm_image_dict[storm_images.ROTATED_GRIDS_KEY],
rotated_grid_spacing_metres=storm_image_dict[
storm_images.ROTATED_GRID_SPACING_KEY]
)
print(SEPARATOR_STRING)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
top_storm_image_dir_name=getattr(
INPUT_ARG_OBJECT, STORM_IMAGE_DIR_ARG_NAME),
radar_source=getattr(INPUT_ARG_OBJECT, RADAR_SOURCE_ARG_NAME),
spc_date_string=getattr(INPUT_ARG_OBJECT, SPC_DATE_ARG_NAME),
radar_field_names=getattr(INPUT_ARG_OBJECT, RADAR_FIELD_NAMES_ARG_NAME),
radar_heights_m_agl=numpy.array(
getattr(INPUT_ARG_OBJECT, RADAR_HEIGHTS_ARG_NAME), dtype=int),
refl_heights_m_agl=numpy.array(
getattr(INPUT_ARG_OBJECT, REFL_HEIGHTS_ARG_NAME), dtype=int)
)
| {
"repo_name": "thunderhoser/GewitterGefahr",
"path": "gewittergefahr/scripts/agglom_storm_images_by_date.py",
"copies": "1",
"size": "8415",
"license": "mit",
"hash": 1168786316500274000,
"line_mean": 38.8815165877,
"line_max": 80,
"alpha_frac": 0.6331550802,
"autogenerated": false,
"ratio": 3.1017323995576853,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4234887479757685,
"avg_score": null,
"num_lines": null
} |
"""Aggregate all of the GO reference information into a single blob for downstream use."""
####
#### Aggregate all of the GO reference information into a single blob
#### for downstream use.
####
#### This script assumes access (via CLI option) of the directory
#### containing the GO reference data file.
####
#### Example usage to aggregate "whatever":
#### python3 aggregate-references.py --help
#### python3 ./scripts/aggregate-references.py -v --directory ./metadata/gorefs --json /tmp/go-refs.json --stanza /tmp/GO.references
####
## Standard imports.
import sys
import argparse
import logging
import glob
import yamldown
import json
import pypandoc
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger('aggregate-references')
LOG.setLevel(logging.WARNING)
def die_screaming(instr):
"""Make sure we exit in a way that will get Jenkins's attention."""
LOG.error(instr)
sys.exit(1)
def wtflist2str(wtflist):
"""Convert the crazy pandoc internal format into something that is much plain text."""
str_cache = ""
for entity in wtflist:
if entity['t'] == "Space":
str_cache = str_cache + " "
elif entity['t'] == "RawInline":
#str_cache = str_cache + "\n"
str_cache = str_cache + " "
elif entity['t'] == "SoftBreak":
str_cache = str_cache + " "
elif entity['t'] == "Str":
str_cache = str_cache + entity['c']
else:
raise Exception("Unknown type in paragraph: " + entity['t'])
return str_cache
## The header that we want to use for the GO.references header.
header = """!
! DEPRECATED!!!
!
! This file is DEPRECATED. Please see go-refs.json relative to this location.
!
! Gene Ontology Reference Collection
!
! The GO reference collection is a set of abstracts that can be cited
! in the GO ontologies (e.g. as dbxrefs for term definitions) and
! annotation files (in the Reference column).
!
! The collection houses two main kinds of references; one type are
! descriptions of methods that groups use for ISS, IEA, and ND
! evidence codes; the other type are abstract-style descriptions of
! "GO content" meetings at which substantial changes in the ontologies
! are discussed and made.
!
! Data fields for this file:
!
! go_ref_id: [mandatory; cardinality 1; GO_REF:nnnnnnn]
! alt_id: [not mandatory; cardinality 0,1,>1; GO_REF:nnnnnnn]
! title: [mandatory; cardinality 1; free text]
! authors: [mandatory; cardinality 1; free text??
! or cardinality 1,>1 and one entry per author?]
! year: [mandatory, cardinality 1]
! external_accession: [not mandatory; cardinality 0,1,>1; DB:id]
! citation: [not mandatory; cardinality 0,1; use for published refs]
! abstract: [mandatory; cardinality 1; free text]
! comment: [not mandatory; cardinality 1; free text]
! is_obsolete: [not mandatory; cardinality 0,1; 'true';
! if tag is not present, assume that the ref is not obsolete
! denotes a reference no longer used by the contributing database]
!
! If a database maintains its own internal reference collection, and
! has a record that is equivalent to a GO_REF entry, the database's
! internal ID should be included as an external_accession for the
! corresponding GO_REF.
!
! This data is available as a web page at
! https://github.com/geneontology/go-site/blob/master/metadata/gorefs/README.md
!
"""
def main():
"""The main runner for our script."""
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--directory',
help='The directory of the GO refs')
parser.add_argument('-j', '--json',
help='JSON output file')
parser.add_argument('-s', '--stanza',
help='Stanza-based output file')
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
LOG.info('Verbose: on')
## Ensure directories and outputs.
if not args.directory:
die_screaming('need a directory argument')
LOG.info('Will operate in: ' + args.directory)
## Ensure output file.
if not args.json and not args.stanza:
die_screaming('need an output file argument, --json or --stanza')
LOG.info('Will output JSON to: ' + args.json)
LOG.info('Will output stanza to: ' + args.stanza)
## Main data hold.
reference_data = []
## Get files out of target directory, flipping the frontmatter
## into JSON.
LOG.info('Globbing GO ref YAMLs in data directory: ' + args.directory + '/goref-*.md')
src_filenames = glob.glob(args.directory + '/go*-*.md')
for src_filename in src_filenames:
LOG.info('GO ref filename: ' + src_filename)
with open(src_filename, "r") as f:
yml, md = yamldown.load(f)
## Break the md into the title, abstract, and comments.
mdj_text = pypandoc.convert_text(md, 'json', format='markdown')
mdj = json.loads(mdj_text)
title = 'n/a'
abstract = 'n/a'
comments = 'n/a'
next_block_type = None
## A workaround for the change in JSON format in pandoc in
## 1.18; Ubuntu 16.04 uses 1.16.0.2 and 18.04 uses
## 1.19.2.4.
blocks = None
if type(mdj) == list:
blocks = mdj[1]
else:
blocks = mdj['blocks']
for block in blocks:
## If is a header and has something there in the
## header.
#LOG.info(json.dumps(block))
if block.get('t', False) == "Header":
if block.get('c', False) and len(block['c']) >= 2:
## Capture the title.
header_text = wtflist2str(block['c'][2])
#LOG.info('header text: ' + header_text)
if header_text.casefold() == "comments" or header_text.casefold() == "comment":
next_block_type = "comments"
#LOG.info("<<next: comments>>")
else:
## Otherwise, we're going to assume this
## is an abstract.
title = header_text
next_block_type = "abstract"
#LOG.info("<<next: abstract>>")
else:
raise Exception("Unknown HEADER")
elif block['t'] == "Para":
if block.get('c', False) and len(block['c']) > 0:
## Capture the title.
para_text = wtflist2str(block['c'])
if next_block_type == "comments":
comments = para_text
#LOG.info('comments text: ' + para_text)
elif next_block_type == "abstract":
abstract = para_text
#LOG.info('abstract text: ' + para_text)
else:
raise Exception("Unknown PARA")
else:
raise Exception("Unknown ENTITY")
yml['abstract'] = abstract
yml['comments'] = comments
yml['title'] = title
reference_data.append(yml)
## Sort by id.
reference_data = sorted(reference_data, key=lambda k: k['id'])
## Final JSON writeout.
if args.json:
with open(args.json, 'w+') as fhandle:
fhandle.write(json.dumps(reference_data, sort_keys=True, indent=4))
## Final JSON writeout.
if args.stanza:
with open(args.stanza, 'w+') as fhandle:
file_cache = []
for ref in reference_data:
stanza_cache = []
if ref.get('id', False):
stanza_cache.append('go_ref_id: ' + ref.get('id'))
alt_ids = ref.get('alt_id', [])
for alt_id in alt_ids:
stanza_cache.append('alt_id: ' + alt_id)
if ref.get('title', False):
stanza_cache.append('title: ' + ref.get('title'))
if ref.get('authors', False):
stanza_cache.append('authors: ' + ref.get('authors'))
if ref.get('year', False):
stanza_cache.append('year: ' + str(ref.get('year')))
external_accessions = ref.get('external_accession', [])
for external_accession in external_accessions:
stanza_cache.append('external_accession: ' + external_accession)
if ref.get('abstract', False):
stanza_cache.append('abstract: ' + ref.get('abstract'))
if ref.get('comments', False):
stanza_cache.append('comment: ' + ref.get('comments'))
file_cache.append("\n".join(stanza_cache))
fhandle.write(header + "\n\n".join(file_cache))
## You saw it coming...
if __name__ == '__main__':
main()
| {
"repo_name": "geneontology/go-site",
"path": "scripts/aggregate-references.py",
"copies": "1",
"size": "9360",
"license": "bsd-3-clause",
"hash": 7161390482938908000,
"line_mean": 36.44,
"line_max": 133,
"alpha_frac": 0.5612179487,
"autogenerated": false,
"ratio": 3.9543726235741445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5015590572274145,
"avg_score": null,
"num_lines": null
} |
"""Aggregate all of the report JSON and other metadata into a single blob for downstream use, such as creating webpages."""
####
#### Aggregate all of the report JSON and other metadata into a single blob
#### for downstream use, such as creating webpages.
####
#### This script assumes access to skyhook or a flat directory of
#### pipeline association products and reports. We used to have those
#### in the same directory, now they are different; they'll need to be
#### recombined for this script to work right now.
#### NOTE: Skip uniprot if identified.
####
#### Example usage to aggregate "whatever":
#### python3 aggregate-json-reports.py --help
#### mkdir -p /tmp/mnt || true
#### mkdir -p /tmp/foo || true
#### sshfs -oStrictHostKeyChecking=no -o IdentitiesOnly=true -o IdentityFile=/home/sjcarbon/local/share/secrets/bbop/ssh-keys/foo.skyhook -o idmap=user skyhook@skyhook.berkeleybop.org:/home/skyhook /tmp/mnt/
#### cp /tmp/mnt/master/annotations/whatever* /tmp/foo
#### cp /tmp/mnt/master/reports/whatever* /tmp/foo
#### fusermount -u /tmp/mnt
#### python3 aggregate-json-reports.py -v --directory /tmp/foo --metadata ~/local/src/git/go-site/metadata/datasets --output /tmp/all_combined.report.json
####
## Standard imports.
import sys
import argparse
import logging
import glob
import os
import json
#from contextlib import closing
import yaml
import requests
## Logger basic setup.
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger('aggregate')
LOG.setLevel(logging.WARNING)
def die_screaming(instr):
"""Make sure we exit in a way that will get Jenkins's attention."""
LOG.error(instr)
sys.exit(1)
def wikidata_taxon_name(tid):
"""Get the taxon name for a taxon ID (if available) via Wikidata."""
## Default return is the ID itself.
ret = 'NCBITaxon:' + tid
query = 'PREFIX wdt: <http://www.wikidata.org/prop/direct/> ' + \
'SELECT * WHERE { ?tid wdt:P685 "'+tid+'" . ?tid wdt:P225 ?name }'
headers = {'accept': 'application/sparql-results+json'}
resp = requests.post('https://query.wikidata.org/sparql', \
data={'query':query}, headers=headers, stream=False)
if resp.status_code == 200:
jret = resp.json()
## Make sure we got what we wanted.
if jret and jret.get('results', False) and \
jret['results'].get('bindings', False) and \
len(jret['results']['bindings']) == 1 and \
jret['results']['bindings'][0].get('name', False):
ret = jret['results']['bindings'][0]['name'].get('value', tid)
# with closing(requests.get(url, stream=False)) as resp:
# if resp.status_code == 200:
# ret = resp.json()
return ret
# def remote_json(url):
# """Get a remote JSON resource"""
# ret = {}
# with closing(requests.get(url, stream=False)) as resp:
# if resp.status_code == 200:
# ret = resp.json()
# return ret
def main():
"""The main runner for our script."""
## Deal with incoming.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--directory',
help='The directory or combined anntations/ and reports/ to act on')
parser.add_argument('-m', '--metadata',
help='The metadata directory')
parser.add_argument('-o', '--output',
help='Output file')
parser.add_argument('-v', '--verbose', action='store_true',
help='More verbose output')
args = parser.parse_args()
if args.verbose:
LOG.setLevel(logging.INFO)
LOG.info('Verbose: on')
## Ensure directory.
if not args.directory:
die_screaming('need a directory argument')
LOG.info('Will operate in: ' + args.directory)
## Ensure metadata.
if not args.metadata:
die_screaming('need a metadata argument')
LOG.info('Will get metadata from: ' + args.metadata)
## Ensure output file.
if not args.output:
die_screaming('need an output file argument')
LOG.info('Will output to: ' + args.output)
resource_metadata = {}
## Read in all of the useful data from the metadata data sources.
LOG.info('Globbingmetadata for sources: ' + args.metadata + '/*.yaml')
metadata_filenames = glob.glob(args.metadata + '/*.yaml')
#LOG.info(metadata_filenames)
for metadata_filename in metadata_filenames:
LOG.info('metadata_filename: ' + metadata_filename)
metadata_data = None
with open(metadata_filename) as mhandle:
metadata_data = yaml.load(mhandle.read())
for subset in metadata_data['datasets']:
## Add what we want.
LOG.info('subset id' + subset['id'])
resource_metadata[subset['id']] = subset
resource_metadata[subset['id']]['resource-id'] = \
metadata_data.get('id', None)
resource_metadata[subset['id']]['resource-label'] = \
metadata_data.get('label', None)
resource_metadata[subset['id']]['resource-description'] = \
metadata_data.get('description', None)
resource_metadata[subset['id']]['resource-project_name'] = \
metadata_data.get('project_name', None)
resource_metadata[subset['id']]['resource-contact_email'] = \
metadata_data.get('contact_email', None)
resource_metadata[subset['id']]['resource-project_url'] = \
metadata_data.get('project_url', None)
resource_metadata[subset['id']]['resource-funding_source'] = \
metadata_data.get('funding_source', None)
resource_metadata[subset['id']]['resource-email_report'] = \
metadata_data.get('email_report', None)
LOG.info('resource_metadata')
LOG.info(resource_metadata)
ids = []
## Get files out of target directory, searching for the IDs
## independent of the metadata (as we'll need to check that too).
LOG.info('Globbing GAFs in data directory: ' + args.directory + '/*.gaf.gz')
src_filenames = glob.glob(args.directory + '/*.gaf.gz')
#LOG.info(src_filenames)
for src_filename in src_filenames:
LOG.info('src_filename: ' + src_filename)
## We are only interested in product GAF files, not source.
if "-src.gaf.gz" in src_filename:
pass
## As well, at this time, we are only interested in non-IEA
## files.
elif "_noiea.gaf.gz" in src_filename:
pass
elif "_valid.gaf.gz" in src_filename:
pass # why not continue?
else:
## Generate a usable "id".
## First, chop off all extensions.
potential_id = src_filename
## I don't know what extensions we'll be using in the future,
## so just chop everything off.
while os.path.splitext(potential_id)[1] != '':
potential_id = os.path.splitext(potential_id)[0]
## Trim off the path.
potential_id = os.path.basename(potential_id)
## Pass: we know that "-src" should be there, so leave it.
ids.append(potential_id)
LOG.info(src_filename)
## Get the report file and assemble a data structure for tests.
## NOTE: Skipping anything that smells of uniprot at this point.
lookup = []
for fid in ids:
###
### Extract information from the report.
###
LOG.info("fids: " + fid)
if fid.lower().find('uniprot') != -1:
LOG.info("Smells like uniprot; skipping: " + fid)
continue
## Read.
## WARNING: Using the markdown version is a holdover from when
## the markdown version was the only version.
read_data = None
with open(args.directory + '/' + fid + '.report.json') as fhandle:
# Data looks like:
# {
# "group": "wb",
# "dataset": "wb",
# "lines": 111003,
# "skipped_lines": 16880,
# "associations": 94123,
# "messages": {
# "other": [
# {
# "level": "ERROR",
# "line": "WB\tWBGene00000001\taap-1\t\tGO:0043551\tGO_REF:0000033\tIBA\tPANTHER:PTN000016388\tP\t\tY110A7A.10\tgene\ttaxon:6239\t20150227\tGO_Central\t\t\n",
# "type": "Invalid identifier",
# "message": "Disallowing GO_REF:0000033 in reference field as of 03/13/2018",
# "obj": "GO_REF:0000033",
# "taxon": ""
# },
# {
# "level": "ERROR",
# "line": "WB\tWBGene00000001\taap-1\t\tGO:0046854\tGO_REF:0000033\tIBA\tPANTHER:PTN000806614\tP\t\tY110A7A.10\tgene\ttaxon:6239\t20150227\tGO_Central\t\t\n",
# "type": "Invalid identifier",
# "message": "Disallowing GO_REF:0000033 in reference field as of 03/13/2018",
# "obj": "GO_REF:0000033",
# "taxon": ""
# },
# ],
# "gorule-0000001": [
# { ... }
# ...
# ]
# }
# }
read_data = json.loads(fhandle.read())
## For the sake of sanity, get rid of some extra stuff.
## NOTE: removed due to groups no longer being in structure.
# read_data.pop("groups", None)
## Better be something in there.
if not read_data:
die_screaming('No report found for: ' + fid)
## Look up to see if we have a hit in the metadata.
if fid + '.gaf' in resource_metadata:
## Assemble report object.
read_data['id'] = fid
read_data['metadata'] = resource_metadata[fid + '.gaf']
#LOG.info(read_data)
LOG.info('Report found for: ' + fid)
lookup.append(read_data)
#LOG.info(lookup)
## Enrich what we got with taxon labels.
id2labels = {}
for resource in lookup:
if 'metadata' in resource and 'taxa' in resource['metadata']:
if isinstance(resource['metadata']['taxa'], list):
resource['metadata']['taxa_label_map'] = {}
for taxa_id in resource['metadata']['taxa']:
if taxa_id in id2labels:
LOG.info('cache hit for: ' + id2labels[taxa_id])
else:
LOG.info('cache miss for: ' + taxa_id)
id_part = taxa_id.split(':')[1]
id2labels[taxa_id] = wikidata_taxon_name(id_part)
resource['metadata']['taxa_label_map'][taxa_id] = \
id2labels[taxa_id]
## Final writeout.
with open(args.output, 'w+') as fhandle:
fhandle.write(json.dumps(lookup, sort_keys=True, indent=4))
## You saw it coming...
if __name__ == '__main__':
main()
| {
"repo_name": "geneontology/go-site",
"path": "scripts/aggregate-json-reports.py",
"copies": "1",
"size": "11320",
"license": "bsd-3-clause",
"hash": 8073658394831471000,
"line_mean": 39.1418439716,
"line_max": 208,
"alpha_frac": 0.5599823322,
"autogenerated": false,
"ratio": 3.8024857238831036,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9827264543864338,
"avg_score": 0.007040702443753232,
"num_lines": 282
} |
# This script fuses volume-level metadata with volume-level
# summaries of BookNLP output in order to produce yearly summaries
# by character and author gender.
import json, csv, os, sys
from collections import Counter
volgender = dict()
voldate = dict()
volbirth = dict()
mindate = 3000
maxdate = 0
with open('post22_corrected_metadata.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
volgender[row['docid']] = row['authgender']
date = int(row['inferreddate'])
voldate[row['docid']] = date
if date < mindate:
mindate = date
if date > maxdate:
maxdate = date
volbirth[row['docid']] = int(row['birthdate'])
# Aggregate novels by year.
# characters are going to be divided both by character gender
# and by author gender, and each of those divisions
# will count up characters of a particular gender (or words spoken
# by those characters) for a particular date.
# words is further subdivided by the grammatical role of the word,
# plus a "total" category that aggregates counts for all four roles.
allgenders = ['u', 'f', 'm']
characters = dict()
words = dict()
speech = dict()
rolesplustotal = ['agent', 'mod', 'patient', 'poss', 'total']
# of these categories will be divided by (chargender, authgender)
for g1 in allgenders:
for g2 in allgenders:
characters[(g1, g2)] = Counter()
speech[(g1, g2)] = Counter()
words[(g1, g2)] = dict()
for role in rolesplustotal:
words[(g1, g2)][role] = Counter()
print()
print('Aggregating results by year.')
# Print results while aggregating for the next level.
skipped = 0
errors = 0
with open('post22_character_data.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
docid = row['docid']
if docid in voldate:
date = voldate[docid]
else:
date = int(row['date'])
# if docid in volbirth:
# birth = volbirth[docid]
# else:
# birth = date - 40
# if int(date) - 99 > int(birth):
# skipped += 1
# continue
# # this is a reprint of an old book
if docid in volgender:
authgender = volgender[docid]
else:
authgender = 'u'
errors += 1
role = row['role']
count = int(row['count'])
chargender = row['gender']
if role == 'speaking':
speech[(chargender, authgender)][date] += count
elif role == 'characters':
characters[(chargender, authgender)][date] += count
else:
words[(chargender, authgender)][role][date] += count
words[(chargender, authgender)]['total'][date] += count
# Each category also gets added to the 'total' category.
fields = ['chargender', 'authgender', 'date', 'characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
with open('corrected_post22_summary.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fields)
writer.writeheader()
for chargender in allgenders:
for authgender in allgenders:
for date in range(mindate, (maxdate + 1)):
outrow = dict()
outrow['chargender'] = chargender
outrow['authgender'] = authgender
outrow['date'] = date
outrow['characters'] = characters[(chargender, authgender)][date]
outrow['speaking'] = speech[(chargender, authgender)][date]
for role in rolesplustotal:
outrow[role] = words[(chargender, authgender)][role][date]
writer.writerow(outrow)
print(skipped)
| {
"repo_name": "tedunderwood/character",
"path": "post22hathi/aggregate_authgender.py",
"copies": "1",
"size": "3801",
"license": "mit",
"hash": 7485458146849789000,
"line_mean": 29.408,
"line_max": 115,
"alpha_frac": 0.5953696396,
"autogenerated": false,
"ratio": 3.6974708171206228,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.978454071165882,
"avg_score": 0.0016599490123606116,
"num_lines": 125
} |
# aggregate_by_authgender.py
import json, csv, os, sys
from collections import Counter
# import utils
currentdir = os.path.dirname(__file__)
libpath = os.path.join(currentdir, '../../lib')
sys.path.append(libpath)
import SonicScrewdriver as utils
volgender = dict()
voldate = dict()
volbirth = dict()
volgenre = dict()
with open('../post22hathi/post22_corrected_metadata.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
volgender[row['docid']] = row['authgender']
voldate[row['docid']] = int(row['inferreddate'])
volbirth[row['docid']] = int(row['birthdate'])
with open('genredict.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
volgenre[row['docid']] = row['genre']
# Aggregate novels by year.
# characters are going to be divided both by character gender
# and by author gender, and each of those divisions
# will count up characters of a particular gender (or words spoken
# by those characters) for a particular date.
# words is further subdivided by the grammatical role of the word,
# plus a "total" category that aggregates counts for all four roles.
allgenders = ['u', 'f', 'm']
characters = dict()
words = dict()
genres = ['none', 'genre', 'romance']
# of these categories will be divided by (chargender, authgender)
for g1 in allgenders:
for g2 in allgenders:
words[(g1, g2)] = dict()
for genre in genres:
words[(g1, g2)][genre] = Counter()
print()
print('Aggregating results by year and genre.')
# Print results while aggregating for the next level.
skipped = 0
errors = 0
with open('../post22hathi/post22_character_data.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
docid = row['docid']
if docid in voldate:
date = voldate[docid]
else:
date = int(row['date'])
if docid in volgender:
authgender = volgender[docid]
else:
authgender = 'u'
if docid in volgenre:
genre = volgenre[docid]
else:
genre = 'none'
errors += 1
role = row['role']
count = int(row['count'])
chargender = row['gender']
if role == 'speaking' or role == 'characters':
continue
else:
words[(chargender, authgender)][genre][date] += count
fields = ['chargender', 'authgender', 'date', 'genre', 'total']
with open('post22_by_genre.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fields)
writer.writeheader()
for chargender in allgenders:
for authgender in allgenders:
for g in genres:
for date in range(1922, 2015):
outrow = dict()
outrow['chargender'] = chargender
outrow['authgender'] = authgender
outrow['date'] = date
outrow['genre'] = g
outrow['total'] = words[(chargender, authgender)][g][date]
writer.writerow(outrow)
print(skipped)
| {
"repo_name": "tedunderwood/character",
"path": "genre_experiment/aggregate_post22__by_genre.py",
"copies": "1",
"size": "3152",
"license": "mit",
"hash": 6858588537765905000,
"line_mean": 26.649122807,
"line_max": 83,
"alpha_frac": 0.5989847716,
"autogenerated": false,
"ratio": 3.5575620767494356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9643593006090692,
"avg_score": 0.0025907684517486327,
"num_lines": 114
} |
# aggregate_by_authgender.py
import json, csv, os, sys
from collections import Counter
volgender = dict()
voldate = dict()
volbirth = dict()
mindate = 3000
maxdate = 0
with open('pre23_corrected_metadata.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
volgender[row['docid']] = row['authgender']
date = int(row['inferreddate'])
voldate[row['docid']] = date
if date < mindate:
mindate = date
if date > maxdate:
maxdate = date
# Aggregate novels by year.
# characters are going to be divided both by character gender
# and by author gender, and each of those divisions
# will count up characters of a particular gender (or words spoken
# by those characters) for a particular date.
# words is further subdivided by the grammatical role of the word,
# plus a "total" category that aggregates counts for all four roles.
allgenders = ['u', 'f', 'm']
characters = dict()
words = dict()
speech = dict()
rolesplustotal = ['agent', 'mod', 'patient', 'poss', 'total']
# of these categories will be divided by (chargender, authgender)
for g1 in allgenders:
for g2 in allgenders:
characters[(g1, g2)] = Counter()
speech[(g1, g2)] = Counter()
words[(g1, g2)] = dict()
for role in rolesplustotal:
words[(g1, g2)][role] = Counter()
print()
print('Aggregating results by year.')
# Print results while aggregating for the next level.
skipped = 0
errors = 0
with open('pre23_character_data.csv', encoding = 'utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
docid = row['docid']
if docid in voldate:
date = voldate[docid]
else:
date = int(row['date'])
if docid in volgender:
authgender = volgender[docid]
else:
authgender = 'u'
errors += 1
role = row['role']
count = int(row['count'])
chargender = row['gender']
if role == 'speaking':
speech[(chargender, authgender)][date] += count
elif role == 'characters':
characters[(chargender, authgender)][date] += count
else:
words[(chargender, authgender)][role][date] += count
words[(chargender, authgender)]['total'][date] += count
# Each category also gets added to the 'total' category.
fields = ['chargender', 'authgender', 'date', 'characters', 'speaking', 'agent', 'mod', 'patient', 'poss', 'total']
with open('corrected_pre23_hathi_summary.csv', mode = 'w', encoding = 'utf-8') as f:
writer = csv.DictWriter(f, fieldnames = fields)
writer.writeheader()
for chargender in allgenders:
for authgender in allgenders:
for date in range(mindate, (maxdate + 1)):
outrow = dict()
outrow['chargender'] = chargender
outrow['authgender'] = authgender
outrow['date'] = date
outrow['characters'] = characters[(chargender, authgender)][date]
outrow['speaking'] = speech[(chargender, authgender)][date]
for role in rolesplustotal:
outrow[role] = words[(chargender, authgender)][role][date]
writer.writerow(outrow)
print(skipped)
| {
"repo_name": "tedunderwood/character",
"path": "pre23hathi/aggregate_old_authgender.py",
"copies": "1",
"size": "3328",
"license": "mit",
"hash": -3082304942318164000,
"line_mean": 29.5321100917,
"line_max": 115,
"alpha_frac": 0.5997596154,
"autogenerated": false,
"ratio": 3.6611661166116614,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9750961319963629,
"avg_score": 0.001992882409606568,
"num_lines": 109
} |
""" Aggregate crime data for use in a web application """
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sdipylib.url import download_ambry_db # install: pip install 'git+https://github.com/sdrdl/sdipylib.git'
from lib import plot_rhythm
import csv
import re
import os
import json
here = os.path.abspath(os.path.dirname(__file__))
data_dir = os.path.join(os.path.dirname(here), 'data')
download_ambry_db('http://s3.sandiegodata.org/library/clarinova.com/crime-incidents-casnd-linked-0.1.2/crimes.db')
download_ambry_db('http://s3.sandiegodata.org/library/clarinova.com/places-casnd-0.1.7/areastats.db')
import sqlite3
conn = sqlite3.connect('crimes.db')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
conn.row_factory = dict_factory
cur = conn.cursor()
cur.execute("attach 'areastats.db' as placestats")
q = """
SELECT crimes.*,
CAST(strftime('%W', datetime) AS INTEGER) AS woy ,
CAST(strftime('%j', datetime) AS INTEGER) AS doy,
city_stats.code as city_code,
city_stats.pop as city_pop, city_stats.land as city_area,
CAST(city_stats.pop AS REAL)/ CAST(city_stats.land AS REAL) as city_density,
community_stats.pop as community_pop, community_stats.land as community_area, community_stats.name as community_name,
CAST(community_stats.pop AS REAL)/ CAST(community_stats.land AS REAL)*1000000 as community_density
FROM crimes
LEFT JOIN placestats.areastats AS city_stats ON city_stats.type = 'city' AND city_stats.code = crimes.city
LEFT JOIN placestats.areastats AS community_stats ON community_stats.type = 'community' AND community_stats.code = crimes.community
"""
by_area = {}
print "Loading records"
for i, row in enumerate(cur.execute(q)):
area = row['community'] if row['community'] != '-' else row['city_name']
if not area or area == 'Unincorporated':
continue
if not area in by_area:
by_area[area] = {}
if not row['legend'] in by_area[area]:
by_area[area][row['legend']] = []
by_area[area][row['legend']].append([row['hour'], row['dow'], row['woy'],row['doy']])
if i % 50000 == 0:
print "Loaded {} records".format(i)
rep = re.compile('[\W_]+')
index = dict(
legends = {},
areas = {},
files = {}
)
for area, area_rows in by_area.items():
for legend, legend_rows in area_rows.items():
legend_file_name = rep.sub('',legend.lower())
area_file_name = rep.sub('',area.lower())
fn = 'incidents-{}-{}.csv'.format(area_file_name, legend_file_name)
afn = os.path.join(os.path.dirname(here), 'data',fn)
index['legends'][legend_file_name] = legend
index['areas'][area_file_name] = area
if area.lower() not in index['files']:
index['files'][area_file_name] = {}
index['files'][area_file_name][legend_file_name] = fn
print "Writing file", fn
with open(afn,'w') as f:
w = csv.writer(f)
w.writerow('hour dow woy doy'.split())
w.writerows(legend_rows)
with open(os.path.join(os.path.dirname(here), 'data','index.json'), 'w' ) as f:
f.write(json.dumps(index, indent = 4))
| {
"repo_name": "sdrdl/crimeviz",
"path": "analysis/makedatafiles.py",
"copies": "1",
"size": "3328",
"license": "mit",
"hash": -8832671421442600000,
"line_mean": 30.1121495327,
"line_max": 131,
"alpha_frac": 0.6352163462,
"autogenerated": false,
"ratio": 3.1665080875356804,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.430172443373568,
"avg_score": null,
"num_lines": null
} |
"""Aggregate grid values to census tracts and classify as above/below a given threshold. Used for safety routing."""
import csv
import json
import argparse
import numpy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("empath_grid_csv")
parser.add_argument("ct_grid_csv")
parser.add_argument("ct_geojson")
args = parser.parse_args()
# NOTE:
# count_ugc = total # of all crimes committed in a grid cell
# count_words = total # of select crimes committed in a grid cell (i.e. the other crime categories in the header)
# San Francisco
if '06075' in args.ct_geojson:
crime_cutoffs = [4, 5.5, 7.4, 15, 50] # 25%, 15%, 10%, 5%, 1%
expected_header = ['drug/narcotic', 'count_ugc', 'kidnapping', 'vehicle theft', 'assault', 'rid', 'weapon laws',
'cid', 'count_words', 'sex offenses, forcible']
# New York City
elif 'nyc' in args.ct_geojson:
crime_cutoffs = [2.7, 3.95, 4.95, 6.5, 10.5] # 25%, 15%, 10%, 5%, 1%
expected_header = ['dangerous drugs', 'kidnapping & related offenses', 'cid',
'grand larceny of motor vehicle', 'felony assault', 'count_words', 'count_ugc',
'dangerous weapons', 'rid', 'assault 3 & related offenses', 'grand larceny']
data = {}
print("loading empath grid scores")
with open(args.empath_grid_csv, "r") as fin:
csvreader = csv.reader(fin)
found_header = next(csvreader)
num_crime_idx = found_header.index("count_words")
cid_idx = found_header.index("cid")
rid_idx = found_header.index("rid")
for col in expected_header:
assert col in found_header, "{0} not in header.".format(col)
for line in csvreader:
gid = "{0},{1}".format(line[cid_idx], line[rid_idx])
num_crimes = int(line[num_crime_idx])
data[gid] = num_crimes
gid_to_ct = {}
ct_to_gid = {}
print("loading ct to grid dict")
with open(args.ct_grid_csv, "r") as fin:
csvreader = csv.reader(fin)
expected_header = ["x", "y", "ctidx"]
cid_idx = expected_header.index("x")
rid_idx = expected_header.index("y")
ct_idx = expected_header.index("ctidx")
assert next(csvreader) == expected_header
for line in csvreader:
gid = "{0},{1}".format(line[cid_idx], line[rid_idx])
ct = int(line[ct_idx])
gid_to_ct[gid] = ct
ct_to_gid[ct] = ct_to_gid.get(ct, []) + [gid]
with open(args.ct_geojson, "r") as fin:
cts_gj = json.load(fin)
for crime_cutoff in crime_cutoffs:
print("averaging crime data across census tracts")
num_features = len(cts_gj["features"])
ct_to_block = set()
crime_histogram = [] # useful for determining cutoff thresholds
for i in range(0, num_features):
avg_crimes = []
for gid in ct_to_gid[i]:
if gid in data:
avg_crimes.append(data[gid])
if avg_crimes:
avg_crimes = numpy.average(avg_crimes)
crime_histogram.append(avg_crimes)
if avg_crimes > crime_cutoff:
ct_to_block.add(i)
num_cells_blocked = 0
for ct in ct_to_block:
num_cells_blocked += len(ct_to_gid[ct])
print("Number of grid cells blocked: {0}".format(num_cells_blocked))
print("Number of census tracts blocked: {0}".format(len(ct_to_block)))
with open(args.empath_grid_csv.replace(".csv", "_ctaggregated_{0}.csv".format(crime_cutoff)), "w") as fout:
csvwriter = csv.writer(fout)
csvwriter.writerow(['rid', 'cid', 'block'])
for gid in data:
rid = gid.split(',')[1]
cid = gid.split(',')[0]
block = 0
if gid_to_ct.get(gid, -1) in ct_to_block:
block = 1
csvwriter.writerow([rid, cid, block])
if __name__ == "__main__":
main()
| {
"repo_name": "joh12041/route-externalities",
"path": "utils/aggregate_grid_values_to_ct.py",
"copies": "1",
"size": "4089",
"license": "mit",
"hash": -421872462839453600,
"line_mean": 40.303030303,
"line_max": 120,
"alpha_frac": 0.5551479579,
"autogenerated": false,
"ratio": 3.3709810387469084,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9417136191146978,
"avg_score": 0.0017985610999861421,
"num_lines": 99
} |
"""Aggregate local features using Fisher Vectors with a GMM as the
probabilistic model"""
from joblib import Parallel, delayed
import numpy as np
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
from .base import BaseAggregator
def _transform_batch(x, means, inv_covariances, inv_sqrt_covariances):
"""Compute the grad with respect to the parameters of the model for the
each vector in the matrix x and return the sum.
see "Improving the Fisher Kernel for Large-Scale Image Classification"
by Perronnin et al. for the equations
Parameters
----------
x: array
The feature matrix to be encoded with fisher encoding
means: array
The GMM means
inverted_covariances: array
The inverse diagonal covariance matrix
Return
------
vector The fisher vector for the passed in local features
"""
# number of gaussians
N, D = means.shape
# number of dimensions
M, D = x.shape
# calculate the probabilities that each x was created by each gaussian
# distribution keeping some intermediate computations as well
diff = x.reshape(-1, D, 1) - means.T.reshape(1, D, N)
diff = diff.transpose(0, 2, 1)
q = -0.5 * (diff * inv_covariances.reshape(1, N, D) * diff).sum(axis=-1)
q = np.exp(q - q.max(axis=1, keepdims=True))
q /= q.sum(axis=1, keepdims=True)
# Finally compute the unnormalized FV and return it
diff_over_cov = diff * inv_sqrt_covariances.reshape(1, N, D)
return np.hstack([
(q.reshape(M, N, 1) * diff_over_cov).sum(axis=0),
(q.reshape(M, N, 1) * (diff_over_cov**2 - 1)).sum(axis=0)
]).ravel()
class FisherVectors(BaseAggregator):
"""Aggregate local features using Fisher Vector encoding with a GMM.
Train a GMM on some local features and then extract the normalized
derivative
Parameters
----------
n_gaussians : int
The number of gaussians to be used for the fisher vector
encoding
n_pca_components : float
Control the number of PCA components we will use to
reduce the dimensionality of our data. The valid range
for this parameter is (0, 1), whith 1 being used to denote
that the PCA components are equal to the number of feature's
dimension
max_iter : int
The maximum number of EM iterations
normalization : int
A bitmask of POWER_NORMALIZATION and L2_NORMALIZATION
dimension_ordering : {'th', 'tf'}
Changes how n-dimensional arrays are reshaped to form
simple local feature matrices. 'th' ordering means the
local feature dimension is the second dimension and
'tf' means it is the last dimension.
inner_batch : int
Compute the fisher vector of 'inner_batch' vectors together.
It controls a trade off between speed and memory.
n_jobs : int
The threads to use for the transform
verbose : int
Controls the verbosity of the GMM
"""
POWER_NORMALIZATION = 1
L2_NORMALIZATION = 2
def __init__(self, n_gaussians, n_pca_components=0.8, max_iter=100,
normalization=3, dimension_ordering="tf", inner_batch=64,
n_jobs=-1, verbose=0):
self.n_gaussians = n_gaussians
self.max_iter = max_iter
self.normalization = normalization
self.inner_batch = inner_batch
self.n_jobs = n_jobs
self.verbose = verbose
self.n_pca_components = n_pca_components
super(self.__class__, self).__init__(dimension_ordering)
# initialize the rest of the attributes of the class for any use
# (mainly because we want to be able to check if fit has been called
# before on this instance)
self.pca_model = None
self.weights = None
self.means = None
self.covariances = None
self.inverted_covariances = None
self.normalization_factor = None
def __getstate__(self):
"""Return the data that should be pickled in order to save the fisher
encoder after it is trained.
This way allows us to control what is actually saved to disk and to
recreate whatever cannot be saved like the probability density
functions. Moreover we can choose if we want to trade between storage
space and initialization time (currently maximum space is used).
"""
# we could be simply grabing self.__dict__ removing pdfs and returning
# it but I believe this is more explicit
return {
"n_gaussians": self.n_gaussians,
"n_pca_components": self.n_pca_components,
"max_iter": self.max_iter,
"normalization": self.normalization,
"dimension_ordering": self.dimension_ordering,
"inner_batch": self.inner_batch,
"n_jobs": self.n_jobs,
"verbose": self.verbose,
"pca_model": self.pca_model,
"weights": self.weights,
"means": self.means,
"covariances": self.covariances,
"inverted_covariances": self.inverted_covariances,
"inverted_sqrt_covariances": self.inverted_sqrt_covariances,
"normalization_factor": self.normalization_factor
}
def __setstate__(self, state):
"""Restore the class's state after unpickling.
Parameters
----------
state: dictionary
The unpickled data that were returned by __getstate__
"""
# A temporary instance for accessing the default values
t = FisherVectors(0)
# Load from state
self.n_gaussians = state["n_gaussians"]
self.n_pca_components = state["n_pca_components"]
self.max_iter = state.get("max_iter", t.max_iter)
self.normalization = state.get("normalization", t.normalization)
self.dimension_ordering = \
state.get("dimension_ordering", t.dimension_ordering)
self.inner_batch = state.get("inner_batch", t.inner_batch)
self.n_jobs = state.get("n_jobs", t.n_jobs)
self.verbose = state.get("verbose", t.verbose)
self.pca_model = state.get("pca_model", t.pca_model)
self.weights = state.get("weights", t.weights)
self.means = state.get("means", t.means)
self.covariances = state.get("covariances", t.covariances)
self.inverted_covariances = \
state.get("inverted_covariances", t.inverted_covariances)
self.inverted_sqrt_covariances= \
state.get("inverted_sqrt_covariances", t.inverted_sqrt_covariances)
self.normalization_factor = \
state.get("normalization_factor", t.normalization_factor)
def fit(self, X, y=None):
"""Learn a fisher vector encoding.
Fit a gaussian mixture model to the data using n_gaussians with
diagonal covariance matrices.
Parameters
----------
X : array_like or list
The local features to train on. They must be either nd arrays or
a list of nd arrays.
"""
X, _ = self._reshape_local_features(X)
if self.n_pca_components != 1:
# train PCA
self.pca_model = PCA(n_components=int(X.shape[-1]*self.n_pca_components))
self.pca_model.fit(X)
# apply PCA and reduce dimensionality
X = self.pca_model.transform(X)
# consider changing the initialization parameters
gmm = GaussianMixture(
n_components=self.n_gaussians,
max_iter=self.max_iter,
covariance_type='diag',
verbose=self.verbose
)
gmm.fit(X)
# save the results of the gmm
self.weights = gmm.weights_
self.means = gmm.means_
self.covariances = gmm.covariances_
# precompute some values for encoding
D = X[0].size
self.inverted_covariances = (1./self.covariances)
self.inverted_sqrt_covariances = np.sqrt(1./self.covariances)
self.normalization_factor = np.hstack([
np.repeat(1.0/np.sqrt(self.weights), D),
np.repeat(1.0/np.sqrt(2*self.weights), D)
])
return self
def transform(self, X):
"""Compute the fisher vector implementation of the provided data.
Parameters
----------
X : array_like or list
The local features to aggregate. They must be either nd arrays or
a list of nd arrays. In case of a list each item is aggregated
separately.
"""
# Check if the GMM is fitted
if self.weights is None:
raise RuntimeError(
"GMM model not found. Have you called fit(data) first?"
)
# Get the local features and the number of local features per document
X, lengths = self._reshape_local_features(X)
if self.n_pca_components != 1:
# Apply PCA and reduce dimensionality
X = self.pca_model.transform(X)
# Allocate the memory necessary for the encoded data
fv = np.zeros((len(lengths), self.normalization_factor.shape[0]))
# Do a naive double loop for now
s, e = 0, 0
for i, l in enumerate(lengths):
s, e = e, e+l
fv[i] = sum(
Parallel(n_jobs=self.n_jobs, backend="threading")(
delayed(_transform_batch)(
X[j:min(e, j+self.inner_batch)],
self.means,
self.inverted_covariances,
self.inverted_sqrt_covariances
)
for j in range(s, e, self.inner_batch)
)
)
# normalize the vectors
fv *= 1.0/np.array(lengths).reshape(-1, 1)
fv *= self.normalization_factor.reshape(1, -1)
# check if we should be normalizing the power
if self.normalization & self.POWER_NORMALIZATION:
fv = np.sqrt(np.abs(fv))*np.sign(fv)
# check if we should be performing L2 normalization
if self.normalization & self.L2_NORMALIZATION:
fv /= np.sqrt(np.einsum("...j,...j", fv, fv)).reshape(-1, 1)
return fv
| {
"repo_name": "paschalidoud/feature-aggregation",
"path": "feature_aggregation/fv.py",
"copies": "1",
"size": "10519",
"license": "mit",
"hash": -7754333678776730000,
"line_mean": 36.4341637011,
"line_max": 85,
"alpha_frac": 0.5928320183,
"autogenerated": false,
"ratio": 4.172550575168584,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265382593468584,
"avg_score": null,
"num_lines": null
} |
"""Aggregate local features using Locality-constrained Linear Coding"""
import numpy as np
from sklearn import cluster
from sklearn.metrics import pairwise_distances
from .base import BaseAggregator
class LLC(BaseAggregator):
"""Compute a Locality-constrained Linear Coding and aggregate local
features with it.
Parameters
----------
n_codewords : int
The codebook size aka the number of clusters
dimension_ordering : {'th', 'tf'}
Changes how n-dimensional arrays are reshaped to form
simple local feature matrices. 'th' ordering means the
local feature dimension is the second dimension and
'tf' means it is the last dimension.
"""
def __init__(self, n_codewords, neighbors=5, beta=1e-4, dimension_ordering="tf"):
self.n_codewords = n_codewords
self.neighbors = neighbors
self.beta = beta
self._clusterer = cluster.MiniBatchKMeans(
n_clusters=self.n_codewords,
n_init=1,
compute_labels=False
)
super(self.__class__, self).__init__(dimension_ordering)
def fit(self, X, y=None):
"""Build the codebook for the LLC model.
Apply the clustering algorithm to the data and use the cluster centers
as codewords for the codebook.
Parameters:
-----------
X : array_like or list
The local features to train on. They must be either nd arrays or
a list of nd arrays.
"""
X, _ = self._reshape_local_features(X)
self._clusterer.fit(X)
return self
def partial_fit(self, X, y=None):
"""Partially learn the codebook from the provided data.
Run a single iteration of the minibatch KMeans on the provided data.
Parameters:
-----------
X : array_like or list
The local features to train on. They must be either nd arrays or
a list of nd arrays.
"""
X, _ = self._reshape_local_features(X)
self._clusterer.partial_fit(X)
return self
def transform(self, X):
"""Compute the LLC representation of the provided data.
Parameters
----------
X : array_like or list
The local features to aggregate. They must be either nd arrays or
a list of nd arrays. In case of a list each item is aggregated
separately.
"""
# Get the local features and the number of local features per document
X, lengths = self._reshape_local_features(X)
# Preprocess the lengths list into indexes in the local feature array
starts = np.cumsum([0] + lengths).astype(int)
ends = np.cumsum(lengths).astype(int)
# Calculate the nearest neighbors
centroids = self._clusterer.cluster_centers_
distances = pairwise_distances(X, centroids)
K = self.neighbors
neighbors = np.argpartition(distances, K)[:, :K]
# Compute the llc representation
llc = np.zeros((len(lengths), self.n_codewords))
L2 = self.beta * np.eye(X.shape[1])
for i, (s, e) in enumerate(zip(starts, ends)):
for j in range(s, e):
# a = argmin_{1^T a = 1} ||x - Ca||_2^2 + \beta ||a||_2^2
C = centroids[neighbors[j]]
a = C.dot(np.linalg.inv(C.T.dot(C) + L2)).dot(X[j])
llc[i, neighbors[j]] = np.maximum(
llc[i, neighbors[j]],
a / a.sum()
)
return llc
| {
"repo_name": "paschalidoud/feature-aggregation",
"path": "feature_aggregation/llc.py",
"copies": "1",
"size": "3648",
"license": "mit",
"hash": -343563646005030900,
"line_mean": 33.4150943396,
"line_max": 85,
"alpha_frac": 0.5718201754,
"autogenerated": false,
"ratio": 4.256709451575262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001996489688459851,
"num_lines": 106
} |
"""Aggregate profiling data and generate an rst file"""
import itertools
import tabulate
def update(means, new, count, num=1):
"""Recursively update mean dictionary"""
for key, val in new.items():
if isinstance(val, dict):
update(means.setdefault(key, {}), val, count)
else:
value = means.get(key, 0)
means[key] = value + (val - value) * num / count
def write_file(results, fil):
"""Write file with results"""
# Compute normalized speeds
for game in next(iter(results.values())):
min_speed = min(g[game]['speed'] for g in results.values())
for games in results.values():
games[game]['norm_speed'] = games[game]['speed'] / min_speed
# Aggregate info over all games
agg_results = {}
for method, game_info in results.items():
agg_info = {}
game_count = 0
for info in game_info.values():
count = info.pop('count')
game_count += count
update(agg_info, info, game_count, count)
agg_results[method] = agg_info
fil.write(""".. _profile_nash:
Nash Equilibrium Methods Comparison
===================================
For each method available for Nash equilibrium finding, this lists various
information about the performance across different game types and starting
locations. "Fraction of Eqa" is the mean fraction of all equilibria found via
that method or starting location. "Weigted Fraction (of Eqa)" is the same,
except each equilibrium is down weighted by the number of methods that found
it, thus a larger weighted fraction indicates that this method found more
unique equilibria. "Time" is the average time in seconds it took to run this
method for every starting location. "Normalized Time" sets the minimum time for
each game type and sets it to one, thus somewhat mitigating the fact that
certain games may be more difficult than others. It also provides an easy
comparison metric to for baseline timing.
""")
fil.write(
'Comparisons Between Methods\n'
'----------------------------------\n\n')
fil.write(tabulate.tabulate(
sorted(([m.title(), v['card'], v['weight'], v['speed'],
v['norm_speed']]
for m, v in agg_results.items()),
key=lambda x: x[1], reverse=True),
headers=['Method', 'Fraction of Eqa', 'Weighted Fraction',
'Time (sec)', 'Normalized Time'],
tablefmt='rst'))
fil.write('\n\n')
for method, game_info in results.items():
title = method.title()
fil.write(title)
fil.write('\n')
fil.writelines(itertools.repeat('-', len(title)))
fil.write('\n\n')
agg_info = agg_results[method]
fil.write(
'Initial Profile Rates\n'
'^^^^^^^^^^^^^^^^^^^^^\n\n')
fil.write(tabulate.tabulate(
sorted(([k.capitalize(), v, agg_info['profweight'][k]]
for k, v in agg_info['profcard'].items()),
key=lambda x: x[1], reverse=True),
headers=['Starting Type', 'Fraction of Eqa',
'Weighted Fraction'], tablefmt='rst'))
fil.write('\n\n')
fil.write(
'Compared to Other Methods\n'
'^^^^^^^^^^^^^^^^^^^^^^^^^\n\n')
fil.write(tabulate.tabulate(
sorted(([m.title(), v,
agg_info['norm_speed'] / agg_results[m]['norm_speed']]
for m, v in agg_info['pair'].items()),
key=lambda x: x[1], reverse=True),
headers=['Method', 'Fraction of Eqa', 'Time Ratio'],
tablefmt='rst'))
fil.write('\n\n')
fil.write(
'By Game Type\n'
'^^^^^^^^^^^^\n\n')
for game, info in game_info.items():
fil.write(game.capitalize())
fil.write('\n')
fil.writelines(itertools.repeat('"', len(game)))
fil.write('\n\n')
fil.write(tabulate.tabulate([
['Fraction of Eqa', info['card']],
['Weighted Fraction of Eqa', info['weight']],
['Time (sec)', info['speed']],
['Normalized Time', info['norm_speed']],
], headers=['Metric', 'Value'], tablefmt='rst'))
fil.write('\n\n')
| {
"repo_name": "egtaonline/GameAnalysis",
"path": "profile/display.py",
"copies": "1",
"size": "4344",
"license": "apache-2.0",
"hash": 3641596534176080400,
"line_mean": 37.7857142857,
"line_max": 79,
"alpha_frac": 0.5534069982,
"autogenerated": false,
"ratio": 4.0036866359447005,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50570936341447,
"avg_score": null,
"num_lines": null
} |
# _aggregate.py
import numpy as np
import pandas as pd
import os
from pandas.tools.pivot import pivot_table
import itertools
from database import DB
from _column_lengths import add_column_length
def setup_transfomers():
db = DB()
transformers = {
"planning_region": {
"table": "attrs_bra_pr",
"key": "bra_id",
"value": "pr_id"
}
}
db_converters = {colname: db.make_dict(**settings)
for colname, settings in transformers.items()}
return db_converters
transformed_depths = setup_transfomers()
def pk(table_name):
''' Determine which columns are part of the primary key, based on table name'''
lookup = {
"y" : ["year"],
"m" : ["month"],
"s" : ['bra_id_s'],
"r" : ['bra_id_r'],
"p" : ["hs_id"],
"c" : ["cfop_class"]
}
pk_cols = []
for letter in table_name:
pk_cols += lookup[letter]
print "PK_cols" , pk_cols
return pk_cols
def year_aggregation(table_data, table_name, pk_cols):
year_cols = [col for col in pk_cols if col != 'month']
yearly = table_data.groupby(level=year_cols).sum()
yearly["month"] = "00"
yearly = yearly.set_index("month", append=True)
yearly = yearly.reorder_levels(pk_cols)
return yearly
def agg_depths(first, t_name):
table= first.reset_index()
geo_depths = [1, 3, 5, 7, "planning_region", 9]
my_nesting = []
my_nesting_cols = []
for letter in t_name:
if letter in ["s", "r"]:
my_nesting.append(geo_depths)
my_nesting_cols.append("bra_id_" + letter)
print my_nesting, my_nesting_cols
print
mynewtable = pd.DataFrame()
for depths in itertools.product(*my_nesting):
my_pk = [table["year"], table["month"]]
for col_name, d in zip(my_nesting_cols, depths):
if type(d) == str:
transformation = transformed_depths[d]
my_pk.append( table[col_name].map(transformation) )
else:
my_pk.append(table[col_name].str.slice(0, d))
moi = table.groupby(my_pk, sort=False).agg( np.sum )
mynewtable = pd.concat([mynewtable, moi])
print "Done: ", depths , " table"
return mynewtable
def make_table(ymbibip, table_name, output_values, odir, output_name, ignore_list=[]):
print table_name, "table in progress..."
pk_cols = pk(table_name)
print "table name", table_name, "pks=",pk_cols
ymbibip = ymbibip.reset_index()
big_table = ymbibip.groupby(pk_cols).sum()
# big_table = big_table.reset_index()
big_table = agg_depths(ymbibip, table_name)
print "Writing csv to disk..."
big_table, added_cols = add_column_length(table_name, big_table)
tmp = output_values + added_cols
output_path = os.path.join(odir, "output_%s_%s.csv" % (table_name, output_name))
big_table.to_csv(output_path, ";", columns = tmp)
return big_table
| {
"repo_name": "DataViva/dataviva-scripts",
"path": "scripts/ei/table_aggregator.py",
"copies": "1",
"size": "2996",
"license": "mit",
"hash": 2854631000665187300,
"line_mean": 28.0873786408,
"line_max": 86,
"alpha_frac": 0.5857810414,
"autogenerated": false,
"ratio": 3.245937161430119,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43317182028301193,
"avg_score": null,
"num_lines": null
} |
{
"repo_name": "multicastTor/multicastTor",
"path": "torps/util/aggregate_relays.py",
"copies": "1",
"size": "2834",
"license": "bsd-3-clause",
"hash": 6291973294725476000,
"line_mean": 36.7866666667,
"line_max": 229,
"alpha_frac": 0.642554693,
"autogenerated": false,
"ratio": 3.170022371364653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9230743141237601,
"avg_score": 0.01636678462541036,
"num_lines": 75
} | |
"""Aggregates the performance results from a edw benchmark.
An edw benchmark, runs multiple iterations of a suite of queries.
Independent raw query performance is aggregated during the benchmark, and used
for generating:
a. Raw query performance samples
b. Aggregated query performance samples
c. Raw wall time for each stream in each iteration
d. Raw wall time for each iteration
e. Aggregated (average) iteration wall time
f. Raw geo mean performance for each iteration
g. Aggregated geo mean performance using the aggregated query performances
"""
import copy
import enum
import functools
import json
import logging
from typing import Any, Dict, Iterable, List, Text
from absl import flags
import numpy as np
from perfkitbenchmarker import sample
flags.DEFINE_bool('edw_generate_aggregated_metrics', True,
'Whether the benchmark generates aggregated_metrics such as '
'geomean. Query performance metrics are still generated.')
FLAGS = flags.FLAGS
class EdwPerformanceAggregationError(Exception):
"""Error encountered during aggregation of performance results."""
def geometric_mean(iterable: List[float]) -> float:
"""Function to compute the geo mean for a list of numeric values.
Args:
iterable: A List of Float performance values
Returns:
A float value equal to the geometric mean of the input performance values.
Raises:
EdwPerformanceAggregationError: If an invalid performance value was included
for aggregation.
"""
if (not iterable or any(perf <= 0.0 for perf in iterable)):
raise EdwPerformanceAggregationError('Invalid values cannot be aggregated.')
a = np.array(iterable)
return a.prod() ** (1.0 / len(a))
class EdwQueryExecutionStatus(enum.Enum):
"""Enum class for potential status of query execution.
Potential values:
FAILED: Indicates that the query execution failed.
SUCCESSFUL: Indicates that the query execution succeeded.
"""
FAILED = 'query_execution_failed'
SUCCESSFUL = 'query_execution_successful'
class EdwQueryPerformance(object):
"""Class that represents the performance of an executed edw query.
Attributes:
name: A string name of the query that was executed
performance: A Float variable set to the query's completion time in secs.
-1.0 is used as a sentinel value implying the query failed. For a successful
query the value is expected to be positive.
execution_status: An EdwQueryExecutionStatus enum indicating success/failure
metadata: A dictionary of query execution attributes (job_id, etc.)
"""
def __init__(self, query_name: Text, performance: float,
metadata: Dict[str, str]):
# TODO(user): add query start and query end as attributes.
self.name = query_name
self.performance = performance
self.execution_status = (EdwQueryExecutionStatus.FAILED
if performance == -1.0
else EdwQueryExecutionStatus.SUCCESSFUL)
self.metadata = metadata
@classmethod
def from_json(cls, serialized_performance: str):
"""Process the serialized query performance from client jar.
Expected Performance format:
{"query_wall_time_in_secs":1.998,"query_end":1601695222108,"query":"1",
"query_start":1601695220110,
"details":{"job_id":"b66b5a8e-633f-4ee4-8632-4e3d0856172f"}}
Args:
serialized_performance: Stringified json performance.
Returns:
An instance of EdwQueryPerformance
"""
results = json.loads(serialized_performance)
if 'details' in results:
metadata = results['details']
else:
metadata = {}
if results['query_wall_time_in_secs'] == -1:
logging.warning('Query %s failed.', results['query'])
return cls(query_name=results['query'],
performance=results['query_wall_time_in_secs'],
metadata=metadata)
def get_performance_sample(self, metadata: Dict[str, str]) -> sample.Sample:
"""Method to generate a sample for the query performance.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample for the edw query performance.
"""
query_metadata = copy.copy(metadata)
query_metadata['query'] = self.name
query_metadata['execution_status'] = self.execution_status
query_metadata.update(self.metadata)
return sample.Sample('edw_raw_query_time', self.performance, 'seconds',
query_metadata)
def get_performance_value(self) -> float:
"""Method to get the query's completion time in secs.
Returns:
A float value set to the query's completion time in secs.
"""
return self.performance
def get_performance_metadata(self) -> Dict[str, str]:
"""Method to get the query's execution attributes (job_id, etc.).
Returns:
A dictionary set to query's execution attributes (job_id, etc.)
"""
return self.metadata
def is_successful(self) -> bool:
"""Validates if the query was successful."""
return self.execution_status == EdwQueryExecutionStatus.SUCCESSFUL
class EdwBaseIterationPerformance(object):
"""Class that represents the performance of an iteration of edw queries."""
class EdwPowerIterationPerformance(EdwBaseIterationPerformance):
"""Class that represents the performance of a power iteration of edw queries.
Attributes:
id: A unique string id for the iteration.
performance: A dictionary of query name to its execution performance which
is a EdwQueryPerformance instance.
successful_count: An integer count of the successful queries in the
iteration.
total_count: An integer count of the total number of queries in the
iteration.
"""
def __init__(self, iteration_id: Text, total_queries: int):
self.id = iteration_id
self.performance = {}
self.total_count = total_queries
self.successful_count = 0
def add_query_performance(self, query_name: Text, performance: float,
metadata: Dict[str, str]):
"""Creates and populates a query performance from the input results.
Updates the iteration's performance map with the query performance.
The method also increaments the success and failure query counts for the
iteration.
Args:
query_name: A string name of the query that was executed
performance: A Float variable set to the query's completion time in secs.
-1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
metadata: Extra metadata to add to each performance.
Raises:
EdwPerformanceAggregationError: If the query has already been added.
"""
query_metadata = copy.copy(metadata)
query_performance = EdwQueryPerformance(
query_name=query_name, performance=performance, metadata=query_metadata)
if query_performance.name in self.performance:
raise EdwPerformanceAggregationError('Attempting to aggregate a '
'duplicate query: %s.' %
query_performance.name)
self.performance[query_performance.name] = query_performance
if query_performance.is_successful():
self.successful_count += 1
def has_query_performance(self, query_name: Text) -> bool:
"""Returns whether the query was run at least once in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was executed in the iteration.
"""
return query_name in self.performance
def is_query_successful(self, query_name: Text) -> bool:
"""Returns whether the query was successful in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was successful in the iteration.
"""
return self.performance.get(query_name).is_successful()
def get_query_performance(self, query_name: Text) -> float:
"""Gets a query's execution performance generated during iteration execution.
Args:
query_name: A String name of the query to retrieve details for
Returns:
A float value set to the query's completion time in secs.
"""
return self.performance[query_name].get_performance_value()
def get_query_metadata(self, query_name: Text) -> Dict[str, Any]:
"""Gets the metadata of a query as executed in the current iteration.
Args:
query_name: Name of the query whose performance is requested.
Returns:
A dictionary set to the query's metadata.
Raises:
EdwPerformanceAggregationError: If the query failed.
"""
if not self.is_query_successful(query_name):
raise EdwPerformanceAggregationError('Cannot aggregate invalid / failed'
' query' + query_name)
return self.performance.get(query_name).metadata
def get_all_queries_in_iteration(self) -> List[Text]:
"""Gets a list of names of all queries in the iteration.
Returns:
A list of all queries in the iteration.
"""
return self.performance.keys()
def get_all_query_performance_samples(
self, metadata: Dict[str, str]) -> List[sample.Sample]:
"""Gets a list of samples for all queries in the iteration.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples of each query's performance
"""
return [
query_performance.get_performance_sample(metadata)
for query_performance in self.performance.values()
]
def is_successful(self, expected_queries: List[Text]) -> bool:
"""Check if all the expected queries ran and all succeeded."""
all_queries_ran = set(
self.get_all_queries_in_iteration()) == set(expected_queries)
all_queries_were_successful = self.total_count == self.successful_count
return all_queries_ran and all_queries_were_successful
def get_queries_geomean(self) -> float:
"""Gets the geometric mean of all queries in the iteration.
Returns:
The (float) geometric mean of all the queries ran in the iteration.
Raises:
EdwPerformanceAggregationError: If the iteration contains unsuccessful
query executions.
"""
return geometric_mean([
query_performance.performance
for query_performance in self.performance.values()
])
def get_queries_geomean_performance_sample(
self, expected_queries: List[Text], metadata: Dict[str,
str]) -> sample.Sample:
"""Gets a sample for geomean of all queries in the iteration.
Args:
expected_queries: A list of query names expected to have been executed in
an iteration.
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of iteration geomean performance.
Raises:
EdwPerformanceAggregationError: If the iteration contains unsuccessful
query executions.
"""
if not self.is_successful(expected_queries):
raise EdwPerformanceAggregationError('Failed executions in iteration.')
raw_geo_mean = self.get_queries_geomean()
geo_mean_metadata = copy.copy(metadata)
return sample.Sample('edw_iteration_geomean_time', raw_geo_mean, 'seconds',
geo_mean_metadata)
class EdwSimultaneousIterationPerformance(EdwBaseIterationPerformance):
"""Class that represents the performance of a simultaneous iteration.
Attributes:
id: A unique string id for the iteration.
start_time: The start time of the iteration in milliseconds since epoch.
end_time: The end time of the iteration in milliseconds since epoch.
wall_time: The wall time in seconds as a double value.
performance: A dictionary of query name to its execution performance which
is an EdwQueryPerformance instance.
all_queries_succeeded: Whether all queries in the iteration were successful.
"""
def __init__(self, iteration_id: Text, iteration_start_time: int,
iteration_end_time: int, iteration_wall_time: float,
iteration_performance: Dict[str, EdwQueryPerformance],
all_queries_succeeded: bool):
self.id = iteration_id
self.start_time = iteration_start_time
self.end_time = iteration_end_time
self.wall_time = iteration_wall_time
self.performance = iteration_performance
self.all_queries_succeeded = all_queries_succeeded
@classmethod
def from_json(cls, iteration_id: str, serialized_performance: str):
"""Process the serialized simultaneous iteration performance from client jar.
Expected Performance format:
{"simultaneous_end":1601145943197,"simultaneous_start":1601145940113,
"all_queries_performance_array":[{"query_wall_time_in_secs":2.079,
"query_end":1601145942208,"job_id":"914682d9-4f64-4323-bad2-554267cbbd8d",
"query":"1","query_start":1601145940129},{"query_wall_time_in_secs":2.572,
"query_end":1601145943192,"job_id":"efbf93a1-614c-4645-a268-e3801ae994f1",
"query":"2","query_start":1601145940620}],
"simultaneous_wall_time_in_secs":3.084}
Args:
iteration_id: String identifier of the simultaneous iteration.
serialized_performance: Stringified json performance.
Returns:
An instance of EdwSimultaneousIterationPerformance
"""
results = json.loads(serialized_performance)
query_performance_map = {}
all_queries_succeeded = 'failure_reason' not in results
if all_queries_succeeded:
for query_perf_json in results['all_queries_performance_array']:
query_perf = EdwQueryPerformance.from_json(
serialized_performance=(json.dumps(query_perf_json)))
query_performance_map[query_perf.name] = query_perf
else:
logging.warning('Failure reported. Reason: %s', results['failure_reason'])
return cls(
iteration_id=iteration_id,
iteration_start_time=(results['simultaneous_start']
if all_queries_succeeded else -1),
iteration_end_time=(results['simultaneous_end']
if all_queries_succeeded else -1),
iteration_wall_time=results['simultaneous_wall_time_in_secs'],
iteration_performance=query_performance_map,
all_queries_succeeded=all_queries_succeeded)
def get_wall_time(self) -> float:
"""Gets the total wall time, in seconds, for the iteration.
The wall time is the time from the start of the first query to the end time
of the last query to finish.
Returns:
The wall time in seconds.
"""
return self.wall_time
def get_wall_time_performance_sample(self, metadata: Dict[
str, str]) -> sample.Sample:
"""Gets a sample for wall time performance of the iteration.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of iteration wall time performance
"""
wall_time = self.wall_time
wall_time_metadata = copy.copy(metadata)
wall_time_metadata['iteration_start_time'] = self.start_time
wall_time_metadata['iteration_end_time'] = self.end_time
return sample.Sample('edw_iteration_wall_time', wall_time, 'seconds',
wall_time_metadata)
def get_all_query_performance_samples(
self, metadata: Dict[str, str]) -> List[sample.Sample]:
"""Gets a list of samples for all queries in the iteration.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples of each query's performance
"""
return [
query_performance.get_performance_sample(metadata)
for query_performance in self.performance.values()
]
def is_successful(self, expected_queries: List[Text]) -> bool:
"""Check if all the expected queries ran and all succeeded."""
all_queries_ran = self.performance.keys() == set(expected_queries)
return all_queries_ran and self.all_queries_succeeded
def has_query_performance(self, query_name: Text) -> bool:
"""Returns whether the query was run at least once in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was executed in the iteration.
"""
return query_name in self.performance
def is_query_successful(self, query_name: Text) -> bool:
"""Returns whether the query was successful in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was successful in the iteration.
"""
if self.has_query_performance(query_name):
return self.performance.get(query_name).is_successful()
return False
def get_query_performance(self, query_name: Text) -> float:
"""Gets a query's execution performance in the current iteration.
Args:
query_name: A String name of the query to retrieve details for
Returns:
A float value set to the query's completion time in secs.
"""
return self.performance[query_name].get_performance_value()
def get_query_metadata(self, query_name: Text) -> Dict[str, Any]:
"""Gets the metadata of a query in the current iteration.
Args:
query_name: Name of the query whose aggregated performance is requested
Returns:
A dictionary set to the query's aggregated metadata, accumulated from the
raw query run in the current iteration.
Raises:
EdwPerformanceAggregationError: If the query failed in the iteration.
"""
if not self.is_query_successful(query_name):
raise EdwPerformanceAggregationError('Cannot aggregate invalid / failed'
' query' + query_name)
return self.performance.get(query_name).metadata
def get_queries_geomean(self) -> float:
"""Gets the geometric mean of all queries in the iteration.
Returns:
The (float) geometric mean of all the queries ran in the iteration.
Raises:
EdwPerformanceAggregationError: If the iteration contains unsuccessful
query executions.
"""
return geometric_mean([
query_performance.performance
for query_performance in self.performance.values()
])
def get_queries_geomean_performance_sample(
self, expected_queries: List[Text], metadata: Dict[str,
str]) -> sample.Sample:
"""Gets a sample for geomean of all queries in the iteration.
Args:
expected_queries: A list of query names expected to have been executed in
an iteration.
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of iteration geomean performance.
Raises:
EdwPerformanceAggregationError: If the iteration contains unsuccessful
query executions.
"""
if not self.is_successful(expected_queries):
raise EdwPerformanceAggregationError('Failed executions in iteration.')
raw_geo_mean = self.get_queries_geomean()
geo_mean_metadata = copy.copy(metadata)
return sample.Sample('edw_iteration_geomean_time', raw_geo_mean, 'seconds',
geo_mean_metadata)
class EdwThroughputIterationPerformance(EdwBaseIterationPerformance):
"""Class that represents the performance of an iteration of edw queries.
Attributes:
id: A unique string id for the iteration.
start_time: The start time of the iteration execution.
end_time: The end time of the iteration execution.
wall_time: The wall time of the stream execution.
performance: A dict of stream_id to stream performances, each of which is a
dictionary mapping query names to their execution performances, which are
EdwQueryPerformance instances.
"""
def __init__(self, iteration_id: Text, iteration_start_time: int,
iteration_end_time: int, iteration_wall_time: float,
iteration_performance: Dict[str, Dict[str,
EdwQueryPerformance]]):
self.id = iteration_id
self.start_time = iteration_start_time
self.end_time = iteration_end_time
self.wall_time = iteration_wall_time
self.performance = iteration_performance
@classmethod
def from_json(cls, iteration_id: str, serialized_performance: str):
"""Process the serialized throughput iteration performance from client jar.
Expected Performance format:
{"throughput_start":1601666911596,"throughput_end":1601666916139,
"throughput_wall_time_in_secs":4.543,
"all_streams_performance_array":[
{"stream_start":1601666911597,"stream_end":1601666916139,
"stream_wall_time_in_secs":4.542,
"stream_performance_array":[
{"query_wall_time_in_secs":2.238,"query_end":1601666913849,
"query":"1","query_start":1601666911611,
"details":{"job_id":"438170b0-b0cb-4185-b733-94dd05b46b05"}},
{"query_wall_time_in_secs":2.285,"query_end":1601666916139,
"query":"2","query_start":1601666913854,
"details":{"job_id":"371902c7-5964-46f6-9f90-1dd00137d0c8"}}
]},
{"stream_start":1601666911597,"stream_end":1601666916018,
"stream_wall_time_in_secs":4.421,
"stream_performance_array":[
{"query_wall_time_in_secs":2.552,"query_end":1601666914163,
"query":"2","query_start":1601666911611,
"details":{"job_id":"5dcba418-d1a2-4a73-be70-acc20c1f03e6"}},
{"query_wall_time_in_secs":1.855,"query_end":1601666916018,
"query":"1","query_start":1601666914163,
"details":{"job_id":"568c4526-ae26-4e9d-842c-03459c3a216d"}}
]}
]}
Args:
iteration_id: String identifier of the throughput iteration.
serialized_performance: Stringified json performance.
Returns:
An instance of EdwThroughputIterationPerformance
"""
results = json.loads(serialized_performance)
stream_performances = {}
all_queries_succeeded = 'failure_reason' not in results
if all_queries_succeeded:
for stream_id, stream_perf_json in enumerate(
results['all_streams_performance_array']):
stream_id = str(stream_id)
stream_performance_map = {}
for query_perf_json in stream_perf_json['stream_performance_array']:
query_perf = EdwQueryPerformance.from_json(
serialized_performance=(json.dumps(query_perf_json)))
stream_performance_map[query_perf.name] = query_perf
stream_performances.update({stream_id: stream_performance_map})
else:
logging.warning('Failure reported. Reason: %s', results['failure_reason'])
return cls(
iteration_id=iteration_id,
iteration_start_time=(results['throughput_start']
if all_queries_succeeded else -1),
iteration_end_time=(results['throughput_end']
if all_queries_succeeded else -1),
iteration_wall_time=results['throughput_wall_time_in_secs'],
iteration_performance=stream_performances)
def has_query_performance(self, query_name: Text) -> bool:
"""Returns whether the query was run at least once in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was executed in the iteration.
"""
for stream in self.performance.values():
if query_name in stream:
return True
return False
def is_query_successful(self, query_name: Text) -> bool:
"""Returns whether the query was successful in the iteration.
Args:
query_name: A String name of the query to check.
Returns:
A boolean value indicating if the query was successful in the iteration.
"""
for stream in self.performance.values():
if query_name in stream:
if not stream[query_name].is_successful():
return False
return True
def get_query_performance(self, query_name: Text) -> float:
"""Gets a query's execution performance aggregated across all streams in the current iteration.
Args:
query_name: A String name of the query to retrieve details for
Returns:
A float value set to the query's average completion time in secs.
"""
all_performances = []
for stream in self.performance.values():
if query_name in stream:
all_performances.append(stream[query_name].get_performance_value())
if not all_performances:
return -1.0
return sum(all_performances) / len(all_performances)
def get_query_metadata(self, query_name: Text) -> Dict[str, Any]:
"""Gets the metadata of a query aggregated across all streams in the current iteration.
Args:
query_name: Name of the query whose aggregated performance is requested
Returns:
A dictionary set to the query's aggregated metadata, accumulated from the
raw query runs in all streams of the current iteration.
Raises:
EdwPerformanceAggregationError: If the query failed in one or more streams
"""
result = {}
for stream_id, stream_performance in self.performance.items():
if query_name in stream_performance:
q_performance = stream_performance[query_name]
result[stream_id + '_runtime'] = q_performance.get_performance_value()
result.update({
stream_id + '_' + k: v
for (k, v) in q_performance.get_performance_metadata().items()
})
return result
def get_all_query_performance_samples(
self, metadata: Dict[str, str]) -> List[sample.Sample]:
"""Gets a list of samples for all queries in all streams of the iteration.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples of each query's performance
"""
all_query_performances = []
for stream_id, stream_performance in self.performance.items():
stream_metadata = copy.copy(metadata)
stream_metadata['stream'] = stream_id
all_query_performances.extend([
query_perf.get_performance_sample(stream_metadata)
for query_perf in stream_performance.values()
])
return all_query_performances
def all_streams_ran_all_expected_queries(
self, expected_queries: List[Text]) -> bool:
"""Checks that the same set of expected queries ran in all streams."""
for stream in self.performance.values():
if set(stream.keys()) != set(expected_queries):
return False
return True
def no_duplicate_queries(self) -> bool:
"""Checks that no streams contain any duplicate queries."""
for stream in self.performance.values():
if len(stream.keys()) != len(set(stream.keys())):
return False
return True
def all_queries_succeeded(self) -> bool:
"""Checks if every query in every stream was successful."""
for stream_performance in self.performance.values():
for query_perf in stream_performance.values():
if query_perf.performance == -1:
return False
return True
def is_successful(self, expected_queries: List[Text]) -> bool:
"""Check if the throughput run was successful.
A successful run meets the following conditions:
- There were more than 0 streams.
- Each stream ran the same set of expected queries (regardless of order)
- Each stream ran each query only once
- Every query in every stream succeeded
Args:
expected_queries: A list of query names expected to have been executed in
an iteration.
Returns:
True if all success conditions were met, false otherwise.
"""
non_zero_streams = len(self.performance) >= 1
all_streams_ran_all_queries = self.all_streams_ran_all_expected_queries(
expected_queries)
no_duplicate_queries = self.no_duplicate_queries()
all_queries_succeeded = self.all_queries_succeeded()
return (non_zero_streams and all_streams_ran_all_queries and
no_duplicate_queries and all_queries_succeeded)
def get_queries_geomean(self) -> float:
"""Gets the geometric mean of all queries in all streams of the iteration.
Returns:
The (float) geometric mean of all the individual queries ran in all
streams of the iteration.
Raises:
EdwPerformanceAggregationError: If the suite contains unsuccessful query
executions.
"""
query_performances = []
for stream in self.performance.values():
for query in stream.values():
query_performances.append(query.get_performance_value())
return geometric_mean(query_performances)
def get_queries_geomean_performance_sample(
self, expected_queries: List[Text], metadata: Dict[str,
str]) -> sample.Sample:
"""Gets a sample for geomean of all queries in all streams of the iteration.
Args:
expected_queries: A list of query names expected to have been executed in
an iteration.
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of iteration geomean performance.
Raises:
EdwPerformanceAggregationError: If the iteration contains unsuccessful
query executions.
"""
if not self.is_successful(expected_queries):
raise EdwPerformanceAggregationError('Failed executions in iteration.')
raw_geo_mean = self.get_queries_geomean()
geo_mean_metadata = copy.copy(metadata)
return sample.Sample('edw_iteration_geomean_time', raw_geo_mean, 'seconds',
geo_mean_metadata)
def get_wall_time(self) -> float:
"""Gets the total wall time, in seconds, for the iteration.
The wall time is the time from the start of the first stream to the end time
of the last stream to finish.
Returns:
The wall time in seconds.
"""
return self.wall_time
def get_wall_time_performance_sample(
self, metadata: Dict[str, str]) -> sample.Sample:
"""Gets a sample for total wall time performance of the iteration.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of iteration wall time performance
"""
wall_time_metadata = copy.copy(metadata)
wall_time_metadata['iteration_start_time'] = self.start_time
wall_time_metadata['iteration_end_time'] = self.end_time
return sample.Sample('edw_iteration_wall_time', self.wall_time, 'seconds',
wall_time_metadata)
class EdwBenchmarkPerformance(object):
"""Class that represents the performance of an edw benchmark.
Attributes:
total_iterations: An integer variable set to total of number of iterations.
expected_queries: A list of query names that are executed in an iteration of
the benchmark
iteration_performances: A dictionary of iteration id (String value) to its
execution performance (an instance of EdwBaseIterationPerformance)
"""
def __init__(self, total_iterations: int, expected_queries: Iterable[Text]):
self.total_iterations = total_iterations
self.expected_queries = list(expected_queries)
self.iteration_performances = {}
def add_iteration_performance(self, performance: EdwBaseIterationPerformance):
"""Add an iteration's performance to the benchmark results.
Args:
performance: An instance of EdwBaseIterationPerformance encapsulating the
iteration performance details.
Raises:
EdwPerformanceAggregationError: If the iteration has already been added.
"""
iteration_id = performance.id
if iteration_id in self.iteration_performances:
raise EdwPerformanceAggregationError('Attempting to aggregate a duplicate'
' iteration: %s.' % iteration_id)
self.iteration_performances[iteration_id] = performance
def is_successful(self) -> bool:
"""Check a benchmark's success, only if all the iterations succeed."""
return functools.reduce((lambda x, y: x and y), [
iteration_performance.is_successful(self.expected_queries)
for iteration_performance in self.iteration_performances.values()
])
def aggregated_query_status(self, query_name: Text) -> bool:
"""Gets the status of query aggregated across all iterations.
A query is considered successful only if
a. Query was executed in every iteration
b. Query was successful in every iteration
Args:
query_name: Name of the query whose aggregated success is requested
Returns:
A boolean value indicating if the query was successful in the benchmark.
"""
for performance in self.iteration_performances.values():
if not performance.has_query_performance(query_name):
return False
if not performance.is_query_successful(query_name):
return False
return True
def aggregated_query_execution_time(self, query_name: Text) -> float:
"""Gets the execution time of query aggregated across all iterations.
Args:
query_name: Name of the query whose aggregated performance is requested
Returns:
A float value set to the query's aggregated execution time
Raises:
EdwPerformanceAggregationError: If the query failed in one or more
iterations
"""
if not self.aggregated_query_status(query_name):
raise EdwPerformanceAggregationError('Cannot aggregate invalid / failed '
'query ' + query_name)
query_performances = [
iteration_performance.get_query_performance(query_name)
for iteration_performance in self.iteration_performances.values()
]
return sum(query_performances) / self.total_iterations
def aggregated_query_metadata(self, query_name: Text) -> Dict[str, Any]:
"""Gets the metadata of a query aggregated across all iterations.
Args:
query_name: Name of the query whose aggregated performance is requested
Returns:
A dictionary set to the query's aggregated metadata, accumulated from the
raw query runs.
Raises:
EdwPerformanceAggregationError: If the query failed in one or more
iterations
"""
if not self.aggregated_query_status(query_name):
raise EdwPerformanceAggregationError('Cannot aggregate invalid / failed '
'query ' + query_name)
result = {}
for iteration_id, iteration_performance in (
self.iteration_performances.items()):
result.update({
iteration_id + '_' + k: v
for (k, v) in iteration_performance.get_query_metadata(
query_name).items()
})
return result
def get_aggregated_query_performance_sample(
self, query_name: Text, metadata: Dict[str, str]) -> sample.Sample:
"""Gets the performance of query aggregated across all iterations.
Args:
query_name: Name of the query whose aggregated performance is requested
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of the query's aggregated execution time
"""
query_metadata = copy.copy(metadata)
query_metadata['query'] = query_name
query_metadata['aggregation_method'] = 'mean'
perf, exec_status, agg_md = -1.0, EdwQueryExecutionStatus.FAILED, {}
if self.aggregated_query_status(query_name):
perf = self.aggregated_query_execution_time(query_name=query_name)
exec_status = EdwQueryExecutionStatus.SUCCESSFUL
agg_md = self.aggregated_query_metadata(query_name=query_name)
query_metadata['execution_status'] = exec_status
query_metadata.update(agg_md)
return sample.Sample('edw_aggregated_query_time', perf, 'seconds',
query_metadata)
def get_all_query_performance_samples(self, metadata: Dict[str, str]) -> List[
sample.Sample]:
"""Generates samples for all query performances.
Benchmark relies on iteration runs to generate the raw query performance
samples
Benchmark appends the aggregated query performance sample
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples (raw and aggregated)
"""
results = []
# Raw query performance samples
for iteration, performance in self.iteration_performances.items():
iteration_metadata = copy.copy(metadata)
iteration_metadata['iteration'] = iteration
results.extend(performance.get_all_query_performance_samples(
iteration_metadata))
# Aggregated query performance samples
for query in self.expected_queries:
results.append(self.get_aggregated_query_performance_sample(
query_name=query, metadata=metadata))
return results
def get_aggregated_wall_time_performance_sample(self,
metadata: Dict[str, str]
) -> sample.Sample:
"""Gets the wall time performance aggregated across all iterations.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of aggregated (averaged) wall time.
"""
wall_times = [
iteration.get_wall_time()
for iteration in self.iteration_performances.values()
]
aggregated_wall_time = sum(wall_times) / self.total_iterations
wall_time_metadata = copy.copy(metadata)
wall_time_metadata['aggregation_method'] = 'mean'
return sample.Sample('edw_aggregated_wall_time', aggregated_wall_time,
'seconds', wall_time_metadata)
def get_wall_time_performance_samples(self, metadata: Dict[str, str]):
"""Generates samples for all wall time performances.
Benchmark relies on iterations to generate the raw wall time performance
samples.
Benchmark appends the aggregated wall time performance sample
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples (raw and aggregated)
"""
results = []
for iteration, performance in self.iteration_performances.items():
iteration_metadata = copy.copy(metadata)
iteration_metadata['iteration'] = iteration
results.append(performance.get_wall_time_performance_sample(
iteration_metadata))
results.append(self.get_aggregated_wall_time_performance_sample(
metadata=metadata))
return results
def get_aggregated_geomean_performance_sample(self,
metadata:
Dict[str,
str]) -> sample.Sample:
"""Gets the geomean performance aggregated across all iterations.
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A sample of aggregated geomean
Raises:
EdwPerformanceAggregationError: If the benchmark conatins a failed query
execution.
"""
if not self.is_successful():
raise EdwPerformanceAggregationError('Benchmark contains a failed query.')
aggregated_geo_mean = geometric_mean([
self.aggregated_query_execution_time(query_name=query)
for query in self.expected_queries
])
geomean_metadata = copy.copy(metadata)
geomean_metadata['intra_query_aggregation_method'] = 'mean'
geomean_metadata['inter_query_aggregation_method'] = 'geomean'
return sample.Sample('edw_aggregated_geomean', aggregated_geo_mean,
'seconds', geomean_metadata)
def get_queries_geomean_performance_samples(self, metadata: Dict[str, str]
) -> List[sample.Sample]:
"""Generates samples for all geomean performances.
Benchmark relies on iteration runs to generate the raw geomean performance
samples
Benchmark appends the aggregated geomean performance sample
Args:
metadata: A dictionary of execution attributes to be merged with the query
execution attributes, for eg. tpc suite, scale of dataset, etc.
Returns:
A list of samples (raw and aggregated)
Raises:
EdwPerformanceAggregationError: If the benchmark conatins a failed query
execution
"""
if not self.is_successful():
raise EdwPerformanceAggregationError('Benchmark contains a failed query.')
results = []
for iteration, performance in self.iteration_performances.items():
iteration_metadata = copy.copy(metadata)
iteration_metadata['iteration'] = iteration
results.append(
performance.get_queries_geomean_performance_sample(
self.expected_queries, iteration_metadata))
results.append(self.get_aggregated_geomean_performance_sample(
metadata=metadata))
return results
| {
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"path": "perfkitbenchmarker/edw_benchmark_results_aggregator.py",
"copies": "1",
"size": "42267",
"license": "apache-2.0",
"hash": 1550876223856840000,
"line_mean": 37.4595086442,
"line_max": 99,
"alpha_frac": 0.6775735207,
"autogenerated": false,
"ratio": 4.300234001424356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5477807522124356,
"avg_score": null,
"num_lines": null
} |
"""Aggregation function for CLI specified options and config file options.
This holds the logic that uses the collected and merged config files and
applies the user-specified command-line configuration on top of it.
"""
import logging
from flake8 import utils
from flake8.options import config
LOG = logging.getLogger(__name__)
def aggregate_options(manager, arglist=None, values=None):
"""Aggregate and merge CLI and config file options.
:param flake8.option.manager.OptionManager manager:
The instance of the OptionManager that we're presently using.
:param list arglist:
The list of arguments to pass to ``manager.parse_args``. In most cases
this will be None so ``parse_args`` uses ``sys.argv``. This is mostly
available to make testing easier.
:param optparse.Values values:
Previously parsed set of parsed options.
:returns:
Tuple of the parsed options and extra arguments returned by
``manager.parse_args``.
:rtype:
tuple(optparse.Values, list)
"""
# Get defaults from the option parser
default_values, _ = manager.parse_args([], values=values)
# Get original CLI values so we can find additional config file paths and
# see if --config was specified.
original_values, original_args = manager.parse_args(arglist)
extra_config_files = utils.normalize_paths(original_values.append_config)
# Make our new configuration file mergerator
config_parser = config.MergedConfigParser(
option_manager=manager,
extra_config_files=extra_config_files,
args=original_args,
)
# Get the parsed config
parsed_config = config_parser.parse(original_values.config,
original_values.isolated)
# Extend the default ignore value with the extended default ignore list,
# registered by plugins.
extended_default_ignore = manager.extended_default_ignore.copy()
LOG.debug('Extended default ignore list: %s',
list(extended_default_ignore))
extended_default_ignore.update(default_values.ignore)
default_values.ignore = list(extended_default_ignore)
LOG.debug('Merged default ignore list: %s', default_values.ignore)
extended_default_select = manager.extended_default_select.copy()
LOG.debug('Extended default select list: %s',
list(extended_default_select))
default_values.extended_default_select = extended_default_select
# Merge values parsed from config onto the default values returned
for config_name, value in parsed_config.items():
dest_name = config_name
# If the config name is somehow different from the destination name,
# fetch the destination name from our Option
if not hasattr(default_values, config_name):
dest_name = config_parser.config_options[config_name].dest
LOG.debug('Overriding default value of (%s) for "%s" with (%s)',
getattr(default_values, dest_name, None),
dest_name,
value)
# Override the default values with the config values
setattr(default_values, dest_name, value)
# Finally parse the command-line options
return manager.parse_args(arglist, default_values)
| {
"repo_name": "vicky2135/lucious",
"path": "oscar/lib/python2.7/site-packages/flake8/options/aggregator.py",
"copies": "3",
"size": "3288",
"license": "bsd-3-clause",
"hash": -2407417777393643500,
"line_mean": 40.6202531646,
"line_max": 78,
"alpha_frac": 0.6879562044,
"autogenerated": false,
"ratio": 4.413422818791946,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6601379023191946,
"avg_score": null,
"num_lines": null
} |
"""Aggregation methods"""
__docformat__ = "restructuredtext en"
import numpy as np
from scipy.sparse import csr_matrix, coo_matrix, isspmatrix_csr, isspmatrix_csc
from pyamg import amg_core
from pyamg.graph import lloyd_cluster
__all__ = ['standard_aggregation', 'naive_aggregation', 'lloyd_aggregation']
def standard_aggregation(C):
"""Compute the sparsity pattern of the tentative prolongator
Parameters
----------
C : csr_matrix
strength of connection matrix
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
Cpts : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import standard_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> standard_aggregation(A)[0].todense() # two aggregates
matrix([[1, 0],
[1, 0],
[0, 1],
[0, 1]], dtype=int8)
>>> A = csr_matrix([[1,0,0],[0,1,1],[0,1,1]])
>>> A.todense() # first vertex is isolated
matrix([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> standard_aggregation(A)[0].todense() # one aggregate
matrix([[0],
[1],
[1]], dtype=int8)
See Also
--------
amg_core.standard_aggregation
"""
if not isspmatrix_csr(C):
raise TypeError('expected csr_matrix')
if C.shape[0] != C.shape[1]:
raise ValueError('expected square matrix')
index_type = C.indptr.dtype
num_rows = C.shape[0]
Tj = np.empty(num_rows, dtype=index_type) # stores the aggregate #s
Cpts = np.empty(num_rows, dtype=index_type) # stores the Cpts
fn = amg_core.standard_aggregation
num_aggregates = fn(num_rows, C.indptr, C.indices, Tj, Cpts)
Cpts = Cpts[:num_aggregates]
if num_aggregates == 0:
# return all zero matrix and no Cpts
return csr_matrix((num_rows, 1), dtype='int8'),\
np.array([], dtype=index_type)
else:
shape = (num_rows, num_aggregates)
if Tj.min() == -1:
# some nodes not aggregated
mask = Tj != -1
row = np.arange(num_rows, dtype=index_type)[mask]
col = Tj[mask]
data = np.ones(len(col), dtype='int8')
return coo_matrix((data, (row, col)), shape=shape).tocsr(), Cpts
else:
# all nodes aggregated
Tp = np.arange(num_rows+1, dtype=index_type)
Tx = np.ones(len(Tj), dtype='int8')
return csr_matrix((Tx, Tj, Tp), shape=shape), Cpts
def naive_aggregation(C):
"""Compute the sparsity pattern of the tentative prolongator
Parameters
----------
C : csr_matrix
strength of connection matrix
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
Cpts : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import naive_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> naive_aggregation(A)[0].todense() # two aggregates
matrix([[1, 0],
[1, 0],
[0, 1],
[0, 1]], dtype=int8)
>>> A = csr_matrix([[1,0,0],[0,1,1],[0,1,1]])
>>> A.todense() # first vertex is isolated
matrix([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> naive_aggregation(A)[0].todense() # two aggregates
matrix([[1, 0],
[0, 1],
[0, 1]], dtype=int8)
See Also
--------
amg_core.naive_aggregation
Notes
-----
Differs from standard aggregation. Each dof is considered. If it has been
aggregated, skip over. Otherwise, put dof and any unaggregated neighbors
in an aggregate. Results in possibly much higher complexities than
standard aggregation.
"""
if not isspmatrix_csr(C):
raise TypeError('expected csr_matrix')
if C.shape[0] != C.shape[1]:
raise ValueError('expected square matrix')
index_type = C.indptr.dtype
num_rows = C.shape[0]
Tj = np.empty(num_rows, dtype=index_type) # stores the aggregate #s
Cpts = np.empty(num_rows, dtype=index_type) # stores the Cpts
fn = amg_core.naive_aggregation
num_aggregates = fn(num_rows, C.indptr, C.indices, Tj, Cpts)
Cpts = Cpts[:num_aggregates]
Tj = Tj - 1
if num_aggregates == 0:
# all zero matrix
return csr_matrix((num_rows, 1), dtype='int8'), Cpts
else:
shape = (num_rows, num_aggregates)
# all nodes aggregated
Tp = np.arange(num_rows+1, dtype=index_type)
Tx = np.ones(len(Tj), dtype='int8')
return csr_matrix((Tx, Tj, Tp), shape=shape), Cpts
def lloyd_aggregation(C, ratio=0.03, distance='unit', maxiter=10):
"""Aggregated nodes using Lloyd Clustering
Parameters
----------
C : csr_matrix
strength of connection matrix
ratio : scalar
Fraction of the nodes which will be seeds.
distance : ['unit','abs','inv',None]
Distance assigned to each edge of the graph G used in Lloyd clustering
For each nonzero value C[i,j]:
======= ===========================
'unit' G[i,j] = 1
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'same' G[i,j] = C[i,j]
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.todense()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].todense() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5).todense()
"""
if ratio <= 0 or ratio > 1:
raise ValueError('ratio must be > 0.0 and <= 1.0')
if not (isspmatrix_csr(C) or isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if distance == 'unit':
data = np.ones_like(C.data).astype(float)
elif distance == 'abs':
data = abs(C.data)
elif distance == 'inv':
data = 1.0/abs(C.data)
elif distance is 'same':
data = C.data
elif distance is 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value distance=%s' % distance)
if C.dtype == complex:
data = np.real(data)
assert(data.min() >= 0)
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_seeds = int(min(max(ratio * G.shape[0], 1), G.shape[0]))
distances, clusters, seeds = lloyd_cluster(G, num_seeds, maxiter=maxiter)
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype='int8')
AggOp = coo_matrix((data, (row, col)),
shape=(G.shape[0], num_seeds)).tocsr()
return AggOp, seeds
| {
"repo_name": "pombreda/pyamg",
"path": "pyamg/aggregation/aggregate.py",
"copies": "1",
"size": "8278",
"license": "bsd-3-clause",
"hash": 4773301270415715000,
"line_mean": 29.2116788321,
"line_max": 79,
"alpha_frac": 0.5428847548,
"autogenerated": false,
"ratio": 3.470859538784067,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45137442935840666,
"avg_score": null,
"num_lines": null
} |
"""Aggregation methods."""
import numpy as np
import scipy.sparse as sparse
from pyamg import amg_core
from pyamg.graph import lloyd_cluster, asgraph
import warnings
__all__ = ['standard_aggregation', 'naive_aggregation', 'lloyd_aggregation', 'balanced_lloyd_aggregation']
def standard_aggregation(C):
"""Compute the sparsity pattern of the tentative prolongator.
Parameters
----------
C : csr_matrix
strength of connection matrix
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
Cpts : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import standard_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.toarray()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> standard_aggregation(A)[0].toarray() # two aggregates
matrix([[1, 0],
[1, 0],
[0, 1],
[0, 1]], dtype=int8)
>>> A = csr_matrix([[1,0,0],[0,1,1],[0,1,1]])
>>> A.toarray() # first vertex is isolated
matrix([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> standard_aggregation(A)[0].toarray() # one aggregate
matrix([[0],
[1],
[1]], dtype=int8)
See Also
--------
amg_core.standard_aggregation
"""
if not sparse.isspmatrix_csr(C):
raise TypeError('expected csr_matrix')
if C.shape[0] != C.shape[1]:
raise ValueError('expected square matrix')
index_type = C.indptr.dtype
num_rows = C.shape[0]
Tj = np.empty(num_rows, dtype=index_type) # stores the aggregate #s
Cpts = np.empty(num_rows, dtype=index_type) # stores the Cpts
fn = amg_core.standard_aggregation
num_aggregates = fn(num_rows, C.indptr, C.indices, Tj, Cpts)
Cpts = Cpts[:num_aggregates]
if num_aggregates == 0:
# return all zero matrix and no Cpts
return sparse.csr_matrix((num_rows, 1), dtype='int8'),\
np.array([], dtype=index_type)
else:
shape = (num_rows, num_aggregates)
if Tj.min() == -1:
# some nodes not aggregated
mask = Tj != -1
row = np.arange(num_rows, dtype=index_type)[mask]
col = Tj[mask]
data = np.ones(len(col), dtype='int8')
return sparse.coo_matrix((data, (row, col)), shape=shape).tocsr(), Cpts
else:
# all nodes aggregated
Tp = np.arange(num_rows+1, dtype=index_type)
Tx = np.ones(len(Tj), dtype='int8')
return sparse.csr_matrix((Tx, Tj, Tp), shape=shape), Cpts
def naive_aggregation(C):
"""Compute the sparsity pattern of the tentative prolongator.
Parameters
----------
C : csr_matrix
strength of connection matrix
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
Cpts : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import naive_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.toarray()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> naive_aggregation(A)[0].toarray() # two aggregates
matrix([[1, 0],
[1, 0],
[0, 1],
[0, 1]], dtype=int8)
>>> A = csr_matrix([[1,0,0],[0,1,1],[0,1,1]])
>>> A.toarray() # first vertex is isolated
matrix([[1, 0, 0],
[0, 1, 1],
[0, 1, 1]])
>>> naive_aggregation(A)[0].toarray() # two aggregates
matrix([[1, 0],
[0, 1],
[0, 1]], dtype=int8)
See Also
--------
amg_core.naive_aggregation
Notes
-----
Differs from standard aggregation. Each dof is considered. If it has been
aggregated, skip over. Otherwise, put dof and any unaggregated neighbors
in an aggregate. Results in possibly much higher complexities than
standard aggregation.
"""
if not sparse.isspmatrix_csr(C):
raise TypeError('expected csr_matrix')
if C.shape[0] != C.shape[1]:
raise ValueError('expected square matrix')
index_type = C.indptr.dtype
num_rows = C.shape[0]
Tj = np.empty(num_rows, dtype=index_type) # stores the aggregate #s
Cpts = np.empty(num_rows, dtype=index_type) # stores the Cpts
fn = amg_core.naive_aggregation
num_aggregates = fn(num_rows, C.indptr, C.indices, Tj, Cpts)
Cpts = Cpts[:num_aggregates]
Tj = Tj - 1
if num_aggregates == 0:
# all zero matrix
return sparse.csr_matrix((num_rows, 1), dtype='int8'), Cpts
else:
shape = (num_rows, num_aggregates)
# all nodes aggregated
Tp = np.arange(num_rows+1, dtype=index_type)
Tx = np.ones(len(Tj), dtype='int8')
return sparse.csr_matrix((Tx, Tj, Tp), shape=shape), Cpts
def lloyd_aggregation(C, naggs=None, measure=None, maxiter=10):
"""Aggregate nodes using Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix
naggs : int
number of aggregates or clusters
measure : ['unit','abs','inv',None]
Distance measure to use and assigned to each edge graph.
For each nonzero value C[i,j]:
======= ===========================
None G[i,j] = C[i,j]
'abs' G[i,j] = abs(C[i,j])
'inv' G[i,j] = 1.0/abs(C[i,j])
'unit' G[i,j] = 1
'sub' G[i,j] = C[i,j] - min(C)
======= ===========================
maxiter : int
Maximum number of iterations to perform
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator. Node i is in cluster j if AggOp[i,j] = 1.
seeds : array
array of seeds or Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> from scipy.sparse import csr_matrix
>>> from pyamg.gallery import poisson
>>> from pyamg.aggregation.aggregate import lloyd_aggregation
>>> A = poisson((4,), format='csr') # 1D mesh with 4 vertices
>>> A.toarray()
matrix([[ 2., -1., 0., 0.],
[-1., 2., -1., 0.],
[ 0., -1., 2., -1.],
[ 0., 0., -1., 2.]])
>>> lloyd_aggregation(A)[0].toarray() # one aggregate
matrix([[1],
[1],
[1],
[1]], dtype=int8)
>>> # more seeding for two aggregates
>>> Agg = lloyd_aggregation(A,ratio=0.5)[0].toarray()
"""
C = asgraph(C)
n = C.shape[0]
if naggs is None:
naggs = int(n / 10)
if naggs <= 0 or naggs > n:
raise ValueError('number of aggregates must be >=1 and <=n)')
if measure is None:
data = C.data
elif measure == 'abs':
data = np.abs(C.data)
elif measure == 'inv':
data = 1.0 / abs(C.data)
elif measure == 'unit':
data = np.ones_like(C.data).astype(float)
elif measure == 'min':
data = C.data - C.data.min()
else:
raise ValueError('unrecognized value measure=%s' % measure)
if C.dtype == complex:
data = np.real(data)
if len(data) > 0:
if data.min() < 0:
raise ValueError('Lloyd aggregation requires a positive measure.')
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
distances, clusters, seeds = lloyd_cluster(G, naggs)
if clusters.min() < 0:
warnings.warn('Lloyd clustering did not cluster every point')
row = (clusters >= 0).nonzero()[0]
col = clusters[row]
data = np.ones(len(row), dtype=np.int32)
AggOp = sparse.coo_matrix((data, (row, col)), shape=(n, naggs)).tocsr()
return AggOp, seeds
def balanced_lloyd_aggregation(C, num_clusters=None):
"""Aggregate nodes using Balanced Lloyd Clustering.
Parameters
----------
C : csr_matrix
strength of connection matrix with positive weights
num_clusters : int
Number of seeds or clusters expected (default: C.shape[0] / 10)
Returns
-------
AggOp : csr_matrix
aggregation operator which determines the sparsity pattern
of the tentative prolongator
seeds : array
array of Cpts, i.e., Cpts[i] = root node of aggregate i
See Also
--------
amg_core.standard_aggregation
Examples
--------
>>> import pyamg
>>> from pyamg.aggregation.aggregate import balanced_lloyd_aggregation
>>> data = pyamg.gallery.load_example('unit_square')
>>> G = data['A']
>>> xy = data['vertices'][:,:2]
>>> G.data[:] = np.ones(len(G.data))
>>> np.random.seed(787888)
>>> AggOp, seeds = balanced_lloyd_aggregation(G)
"""
if num_clusters is None:
num_clusters = int(C.shape[0] / 10)
if num_clusters < 1 or num_clusters > C.shape[0]:
raise ValueError('num_clusters must be between 1 and n')
if not (sparse.isspmatrix_csr(C) or sparse.isspmatrix_csc(C)):
raise TypeError('expected csr_matrix or csc_matrix')
if C.data.min() <= 0:
raise ValueError('positive edge weights required')
if C.dtype == complex:
data = np.real(C.data)
else:
data = C.data
G = C.__class__((data, C.indices, C.indptr), shape=C.shape)
num_nodes = G.shape[0]
seeds = np.random.permutation(num_nodes)[:num_clusters]
seeds = seeds.astype(np.int32)
mv = np.finfo(G.dtype).max
d = mv * np.ones(num_nodes, dtype=G.dtype)
d[seeds] = 0
cm = -1 * np.ones(num_nodes, dtype=np.int32)
cm[seeds] = seeds
amg_core.lloyd_cluster_exact(num_nodes,
G.indptr, G.indices, G.data,
num_clusters,
d, cm, seeds)
col = cm
row = np.arange(len(cm))
data = np.ones(len(row), dtype=np.int32)
AggOp = sparse.coo_matrix((data, (row, col)),
shape=(G.shape[0], num_clusters)).tocsr()
return AggOp, seeds
| {
"repo_name": "pyamg/pyamg",
"path": "pyamg/aggregation/aggregate.py",
"copies": "1",
"size": "10661",
"license": "mit",
"hash": -1166843716707742500,
"line_mean": 29.0309859155,
"line_max": 106,
"alpha_frac": 0.5509802082,
"autogenerated": false,
"ratio": 3.4771689497716896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4528149157971689,
"avg_score": null,
"num_lines": null
} |
"""aggregation operations
:notes::
aggregation currently doesn't support shift bit operators those can be substituted by:
x >> y = x / (2 ** y)
x << y = x * (2 ** y)
"""
from mongoUtils.helpers import pp_doc
from pymongo.command_cursor import CommandCursor
from bson.son import SON
from datetime import datetime
class Aggregation(object):
"""**a helper for constructing aggregation pipelines** see:
`aggregation framework <http://docs.mongodb.org/manual/reference/aggregation/>`_ supports all
`aggregation operators <http://docs.mongodb.org/manual/reference/operator/aggregation/>`_
:param obj collection: a pymongo collection object
:param list pipeline: (optional) an initial pipeline list
:param dict kwargs: (optional) `any arguments <http://docs.mongodb.org/manual/reference/operator/aggregation/>`_
:returns: an aggregation object
:Example:
>>> from pymongo import MongoClient;from mongoUtils.configuration import testDbConStr # import MongoClient
>>> db = MongoClient(testDbConStr).get_default_database() # get test database
>>> aggr_obj = Aggregation(db.muTest_tweets_users, allowDiskUse=True) # select users collection
>>> aggr_obj.help() # ask for help
['project', 'match', 'redact', 'limit', .... ] # available operators
>>> aggr_obj.match({'lang': 'en'}) # match English speaking
>>> aggr_obj.group({'_id': None, "avg_followers": {"$avg": "$followers_count"}}) # get average followers
>>> print(aggr_obj.code(False)) # print pipeline
[{"$match": {"lang": "en"}},{"$group": {"avg_followers":
{"$avg": "$followers_count"},"_id": null}}]
>>> next(aggr_obj()) # execute and get results
{u'avg_followers': 2943.8210227272725, u'_id': None}) # results
""" # executes aggregation
_operators = 'project match redact limit skip sort unwind group out geoNear indexStats sample lookup graphLookup facet, collStats, indexStats '.split(' ')
def __init__(self, collection, pipeline=None, **kwargs):
def _makefun(name):
setattr(self, name, lambda value, position=None: self.add('$' + name, value, position))
self._collection = collection
self._kwargs = kwargs
self._pll = pipeline or [] # pipeline list
for item in self._operators: # auto build functions for operators
_makefun(item)
@classmethod
def construct_fields(cls, fields_list=[]):
"""a constructor for fields
"""
return SON([(i.replace('.', '_'), '$'+i) for i in fields_list])
@classmethod
def construct_stats(cls, fields_lst, _id=None, stats=['avg', 'max', 'min'], incl_count=True):
"""a constructor helper for group statistics
:Parameters:
- fields_lst: (list) list of field names
- stats: (list) list of statistics
- incl_count: (Bool) includes a count if True
:Example:
>>> specs_stats(['foo'])
{'max_foo': {'$max': '$foo'}, '_id': None, 'avg_foo': {'$avg': '$foo'}, 'min_foo': {'$min': '$foo'}}
"""
frmt_field_stats = "{}_{}"
res = {}
for field in fields_lst:
res.update({frmt_field_stats.format(i, field): {'$'+i: '$'+field} for i in stats})
if incl_count:
res.update({'count': {'$sum': 1}})
res.update({'_id': _id})
return res
@property
def pipeline(self):
"""returns the pipeline (a list)"""
return self._pll
@classmethod
def help(cls, what='operators'):
"""returns list of available operators"""
print(cls._operators)
def add(self, operator, value, position=None):
"""adds an operation at specified position in pipeline"""
if position is None:
position = len(self._pll)
self._pll.insert(position, {operator: value})
def search(self, operator, count=1):
"""returns (position, operator"""
cnt = 0
for i, item in enumerate(self.pipeline):
if list(item.keys())[0] == operator:
cnt += 1
if cnt == count:
return (i, item)
def save(self, file_pathname):
"""save pipeline list to file"""
with open(file_pathname, 'w') as f:
return f.write(self.code(verbose=False))
def remove(self, position):
"""remove an element from pipeline list given its position"""
del self._pll[position]
def code(self, verbose=True):
return pp_doc(self.pipeline, 4, sort_keys=False, verbose=verbose)
def clear(self):
self._pll = []
def __call__(self, print_n=None, **kwargs):
"""perform the aggregation when called
>>> Aggregation_object()
for kwargs see: `aggregate <http://api.mongodb.org/python/current/api/pymongo/collection.html>`_
:Parameters:
- print_n:
- True: will print results and will return None
- None: will cancel result printing
- int: will print top n documents
- kwargs: if any of kwargs are specified override any arguments provided on instance initialization.
"""
tmp_kw = self._kwargs.copy()
tmp_kw.update(kwargs)
rt = self._collection.aggregate(self.pipeline, **tmp_kw)
if print_n is not None:
print (self._frmt_str.format("--" * 40, len(self.pipeline), str(self.pipeline[-1])))
if isinstance(rt, CommandCursor):
for cnt, doc in enumerate(rt):
print (doc)
if print_n is not True and cnt+2 > print_n:
break
return None
else:
print (rt)
return rt
class AggrCounts(Aggregation):
"""
constructs a group count aggregation pipeline based on :class:`~Aggregation` class
:param obj (collection): a pymongo collection object
:param field (str or list of strings): field name
:param match (dict): a query match expression, defaults to None
:param sort (dict): a sort expression defaults to {'count': -1}
:param kwargs 9dict): optional arguments to pass to parent :class:`~Aggregation`
:Example:
>>> from pymongo import MongoClient;from mongoUtils.configuration import testDbConStr # import MongoClient
>>> db = MongoClient(testDbConStr).get_default_database() # get test database
>>> AggrCounts(db.muTest_tweets_users, "lang", sort={'count': -1})(verbose=True) # counts by language
{u'count': 352, u'_id': u'en'}
{u'count': 283, u'_id': u'ja'}
{u'count': 100, u'_id': u'es'} ...
"""
def __init__(self, collection, field, match=None, sort={'count': -1}, **kwargs):
super(AggrCounts, self).__init__(collection, **kwargs)
if match is not None:
self.match(match)
if isinstance(field, list):
field = {i: '$' + i for i in field}
else:
field = '$'+field
self.group({'_id': field, 'count': {'$sum': 1}})
if sort is not None:
self.sort(sort)
class AggrCountsWithPerc(AggrCounts):
"""
we calculate percentages in python coz its tricky to do it efficiently in one pass in AggrCounts since we can't get totals up-front
without exececuting the query
"""
def results(self, round_perc=None):
res = [i for i in self()]
total = float(sum([i['count'] for i in res]))
for i in res:
perc = (i['count'] / total * 100)
if round_perc:
perc = round(perc, round_perc)
i['perc'] = perc
return res
class AggrSample(Aggregation):
def __init__(self, collection, size, match=None, **kwargs):
super(AggrSample, self).__init__(collection, **kwargs)
self.sample({'size': size})
class AggrLookUp(Aggregation):
"""
constructs a lookUp aggregation pipeline based on :class:`~Aggregation` class
:param obj collection: a pymongo collection object
:param str from_collection: target collection name to lookUp from (in same db)
:param str localField: source collection field name to join
:param str foreingField: target collection field name to join
:param str AS: field name wher to put Joined document
:param boolean orphans: return orphan's only if True, not orphans if False, All if None
:param dict match: a query match expression, defaults to None
:param dict kwargs: optional arguments to pass to parent :class:`~Aggregation`
"""
def __init__(self, collection, from_collection, localField, foreingField, AS='lookUp', orphans=None, match=None, **kwargs):
super(AggrLookUp, self).__init__(collection, **kwargs)
if match is not None:
self.match(match)
self.lookup({'from': from_collection,
'localField': localField,
'foreignField': foreingField,
'as': AS
})
if orphans is True:
self.match({AS: []})
elif orphans is False:
self.match({AS: {'$ne': []}})
class GraphLookUp(Aggregation):
def __init__(self, collection, from_collection, startWith, connectFromField, connectToField, AS,
maxDepth=None, depthField=None, restrictSearchWithMatch=None,
match=None, orphans=False, **kwargs):
super(GraphLookUp, self).__init__(collection, **kwargs)
args = {'from': from_collection,
'startWith': startWith,
'connectFromField': connectFromField,
'connectToField': connectToField,
'as': AS
}
if maxDepth is not None:
args.update({'maxDepth': maxDepth})
if depthField is not None:
args.update({'depthField': depthField})
if restrictSearchWithMatch is not None:
args.update({'restrictSearchWithMatch': restrictSearchWithMatch})
self.graphLookup(args)
if orphans is True:
self.match({AS: []})
elif orphans is False:
self.match({AS: {'$ne': []}})
def projection_tstodt(ts_field_name):
""" returns a projection field that can be used as native mongoDB date from a ts field
:param ts_field_name (str): aggregation name of a field on collection that contains an epoch based integer timestamp value ie '$foo.bar'
"""
return {'$add': [datetime.utcfromtimestamp(0), {"$multiply": [1000, ts_field_name]}]}
| {
"repo_name": "nickmilon/mongoUtils",
"path": "mongoUtils/aggregation.py",
"copies": "1",
"size": "11059",
"license": "apache-2.0",
"hash": 6354431143092468000,
"line_mean": 41.3716475096,
"line_max": 158,
"alpha_frac": 0.5715706664,
"autogenerated": false,
"ratio": 4.193780811528252,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5265351477928252,
"avg_score": null,
"num_lines": null
} |
"""Agile app for managing releases
"""
import os
from datetime import date
import glob
import asyncio
from dateutil import parser
from pulsar.utils.html import capfirst
from .. import core
class RemoveRelease(core.AgileSetting):
name = "remove_release"
flags = ['--remove-release']
nargs = "?"
const = 'current'
desc = "Remove a release from github"
close_issue = set((
'close',
'closes',
'closed',
'fix',
'fixes',
'fixed',
'resolve',
'resolves',
'resolved'
))
class Github(core.AgileCommand):
"""Create Github releases.
Without the ``--commit`` or ``--push`` flags nothing is committed
or pushed to the remote repository
"""
description = 'Create a new release in github'
actions = frozenset(('shell', 'upload'))
async def run(self, name, config, options):
git = self.git
gitapi = self.gitapi
release_notes_file = config.get('release_notes_file',
'release-notes.md')
release = {}
opts = dict(options)
opts.update(config)
# repo api object
repo = gitapi.repo(git.repo_path)
#
# Get the version to release and validate
version = opts.get('version')
if not version:
raise self.error('"version" not specified in github.%s '
'dictionary' % name)
version = self.render(version)
if self.cfg.remove_release:
if self.cfg.remove_release != 'current':
version = self.cfg.remove_release
await self.remove_release(version)
return
tag_prefix = opts.get('tag_prefix', '')
current = await repo.releases.validate_tag(version, tag_prefix) or {}
release['tag_name'] = version
#
# Release notes
location = opts.get('release_notes')
if location:
note_file = os.path.join(self.repo_path, release_notes_file)
if os.path.isfile(note_file):
with open(note_file, 'r') as file:
release['body'] = file.read().strip()
await self.write_notes(location, release)
else:
self.logger.info('Create release notes from commits &'
' pull requests')
release['body'] = await self.get_notes(repo, current)
with open(note_file, 'w') as file:
file.write(release['body'])
# Extit so that the release manager can edit the file
# before releasing
return self.logger.info('Created new %s file' % note_file)
#
# Commit or Push
if self.cfg.commit or self.cfg.push:
#
# Create the tar or zip file
dist = self.as_dict(opts.get('dist', {}),
"dist entry should be a dictionary")
for name, value in dist.items():
if name not in self.actions:
raise self.error('No such action "%s"' % name)
#
version = '%s%s' % (tag_prefix, version)
release['tag_name'] = version
self.logger.info('Commit changes')
self.log_execute(await git.commit(msg='Release %s' % version))
#
# Push to github and create tag
if self.cfg.push:
self.logger.info('Push changes')
self.log_execute(await git.push())
self.logger.info('Pause for two second before creating a tag')
await asyncio.sleep(2)
self.logger.info('Creating a new tag %s', version)
release = await repo.releases.create(release)
self.logger.info('Congratulation, the new release %s is out',
release['tag_name'])
async def remove_release(self, tag):
repo = self.gitapi.repo(self.git.repo_path)
self.logger.info('Remove release %s from github', tag)
try:
await repo.releases.delete(tag=tag)
except Exception as exc:
response = getattr(exc, 'response', None)
if response and response.status_code == 404:
self.logger.warning('Release with tag %s not available', tag)
else:
raise
# Remove tag
await self.git.tags_remove(tag)
async def upload(self, name, src, release=None, **kw):
if release:
tag = release['tag_name']
rel = self.gitapi.repo(self.git.repo_path).release(release['id'])
for src in self.as_list(src):
src = self.render(src)
for filename in glob.glob(src):
self.logger.info('Uploading %s to release %s',
filename, tag)
await rel.upload(filename)
async def get_notes(self, repo, current):
"""Fetch release notes from github
"""
created_at = current.get('created_at')
notes = []
notes.extend(await self._from_commits(repo, created_at))
notes.extend(await self._from_pull_requests(repo, created_at))
sections = {}
for _, section, body in reversed(sorted(notes, key=lambda s: s[0])):
if section not in sections:
sections[section] = []
sections[section].append(body)
body = []
for title in sorted(sections):
if title:
body.append('### %s' % capfirst(title))
for entry in sections[title]:
if not entry.startswith('* '):
entry = '* %s' % entry
body.append(entry)
body.append('')
return '\n'.join(body)
async def add_note(self, repo, notes, message, dte, eid, entry):
"""Add a not to the list of notes if a release note key is found
"""
key = '#release-note'
index = message.find(key)
if index == -1:
substitutes = {}
bits = message.split()
for msg, bit in zip(bits[:-1], bits[1:]):
if bit.startswith('#') and msg.lower() in close_issue:
try:
number = int(bit[1:])
except ValueError:
continue
if bit not in substitutes:
try:
issue = await repo.issue(number).get()
except Exception:
continue
substitutes[bit] = issue['html_url']
if substitutes:
for name, url in substitutes.items():
message = message.replace(name, '[%s](%s)' % (name, url))
notes.append((dte, '', message))
else:
index1 = index + len(key)
if len(message) > index1 and message[index1] == '=':
section = message[index1+1:].split()[0]
key = '%s=%s' % (key, section)
else:
section = ''
body = message.replace(key, '').strip()
if body:
body = body[:1].upper() + body[1:]
body = '%s [%s](%s)' % (body, eid, entry['html_url'])
notes.append((dte, section.lower(), body))
async def write_notes(self, location, release):
dt = date.today().strftime('%Y-%b-%d')
version = release['tag_name']
title = '## Ver. %s' % version
body = ['%s - %s' % (title, dt), '']
body.extend(release['body'].strip().splitlines())
bits = version.split('.')
bits[2] = 'md'
filename = os.path.join(location, '.'.join(bits))
if not os.path.isdir(location):
os.makedirs(location)
add_file = True
if os.path.isfile(filename):
# We need to add the file
add_file = False
with open(filename, 'r') as file:
lines = file.read().strip().splitlines()
lines = self._remove_notes(lines, title)
body.extend(('', ''))
body.extend(lines)
with open(filename, 'w') as file:
file.write('\n'.join(body))
self.logger.info('Added release notes to %s', filename)
if add_file:
self.logger.info('Add %s to repository', filename)
await self.git.add(filename)
def _remove_notes(self, lines, title):
# We need to remove the previous notes
remove = False
for line in lines:
if line.startswith(title):
remove = True
elif line.startswith('## '):
remove = False
if not remove:
yield line
async def _from_commits(self, repo, created_at=None):
#
# Collect notes from commits
notes = []
for entry in await repo.commits.get_list(since=created_at):
commit = entry['commit']
dte = parser.parse(commit['committer']['date'])
eid = entry['sha'][:7]
message = commit['message']
await self.add_note(repo, notes, message, dte, eid, entry)
if commit['comment_count']:
for comment in await repo.commits.comments(entry):
message = comment['body']
await self.add_note(repo, notes, message, dte, eid, entry)
return notes
async def _from_pull_requests(self, repo, created_at=None):
#
# Collect notes from pull requests
callback = check_update(created_at) if created_at else None
pulls = await repo.pulls.get_list(callback=callback,
state='closed',
sort='updated',
direction='desc')
notes = []
for entry in pulls:
message = entry['body']
dte = parser.parse(entry['closed_at'])
eid = '#%d' % entry['number']
await self.add_note(repo, notes, message, dte, eid, entry)
comments = await repo.issues.comments(entry)
for comment in comments:
message = comment['body']
await self.add_note(repo, notes, message, dte, eid, entry)
return notes
class check_update:
"""Filter pull requests
"""
def __init__(self, since):
self.since = parser.parse(since)
def __call__(self, pulls):
new_pulls = []
for pull in pulls:
dte = parser.parse(pull['updated_at'])
if dte > self.since:
new_pulls.append(pull)
return new_pulls
| {
"repo_name": "quantmind/git-agile",
"path": "agile/plugins/github.py",
"copies": "2",
"size": "10796",
"license": "bsd-3-clause",
"hash": 6840276892495755000,
"line_mean": 35.472972973,
"line_max": 78,
"alpha_frac": 0.5104668396,
"autogenerated": false,
"ratio": 4.316673330667733,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5827140170267733,
"avg_score": null,
"num_lines": null
} |
"""Agilent N6705B Power Supply SCIPI Class.
Copyright (c) 2014 The Project Loon Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file
"""
__author__ = "Alfred Cohen"
__email__ = "alfredcohen@google.com"
import time
class Driver(object):
"""Agilent N6705B Power Supply - Driver specific Class."""
def __init__(self, inst, cfg=None):
"""Initialize the specific driver."""
self.i = inst
if not cfg:
self.i.set_error("Configuration File for this instrument not available")
return
self.cfg = cfg["cfg"]
self.power = cfg["power"]
def __del__(self):
"""Destroy and cleanup/shutdown power."""
self.set_power(0)
return
def get_params(self):
"""Dictionary holding all driver specific parameter mappings.
Returns:
Structure in the format:
PARM: p = "SCIPI:PARAMETER",
u = "Units" to append,
v = "param = paramdevice"
"""
return {
"Volt": {"p": "VOLT "},
"Curr": {"p": "CURR "},
"VoltMax": {"p": "VOLT:PROT:LEV "},
"CurrMax": {"p": "CURR:PROT:LEV "}
}
def setup(self):
"""Set-up the device according to the supplied configuration."""
# Prevent resetting parameters if specific configuration setting exists.
if "no_setup" in self.cfg and self.cfg["no_setup"]:
return True
ok = []
self.i.set("*RST")
seq = self.get_channels("list", all=True)
for ch in seq:
chan = str(ch)
for param in self.cfg[chan]:
cmd = self.getcmd(param, chan)
if not cmd:
continue
res = self.i.set(cmd)
if res:
ok.append(True)
else:
ok.append(False)
self.i.get("*OPC?")
return all(ok)
def getcmd(self, p="", ch=""):
"""Use the Global Wrapper to get full SCIPI Command for the parameter."""
if not p:
return
channel = ("" if not ch else ", (@%s)"%ch)
cmd = self.i.getcmd(p, self.cfg[str(ch)])
if not cmd:
return
return cmd + channel
def set_power(self, on=0, chnls=None):
"""Turn Power On/Off for all power channels or selected ones."""
ok = []
mode = "ON " if on == 1 else "OFF "
seq = chnls if chnls else self.get_channels("list")
if on == 0:
seq = reversed(seq)
delay = 0.8 if "delay" not in self.power else self.power["delay"]
for ch in seq:
res = self.i.set("OUTP %s, (@%s)"%(mode, ch))
ok.append(res)
time.sleep(delay)
return all(ok)
def get_channels(self, mtype="str", all=True):
"""Obtain All configured Channels in order and return a list or string."""
seq = self.power["seq"] if "seq" in self.power else []
if all and "main" in self.power:
if self.power["main"] not in seq:
seq.append(self.power["main"])
return ",".join(map(str, seq)) if mtype is "str" else seq
def get_meas(self, mtype="VOLT"):
"""Perform Measurement of the specified type for the used channels."""
cmd = "OUTP?" if mtype == "ON" else "MEAS:%s?"%mtype
vals = self.i.geta("%s (@%s)"%(cmd, self.get_channels("str", all=True)))
data = []
if not vals:
return data
for v in vals:
newval = int(v) if mtype == "ON" else float(v)
data.append(newval)
return data
def action(self, *args):
"""Perform Default Driver Action. In this case Turn on/off power."""
return self.set_power(*args)
def get_measure(self):
"""Perform a complete measurement, with all most important readings."""
ok = []
res = {}
self.i.set_error()
raw = {}
for q in ("VOLT", "CURR", "ON"):
meas = self.get_meas(q)
ok.append(bool(meas))
raw[q] = meas
if not all(ok):
self.i.set_error("Measurement Read Fail: OK=%s/%s (%s)"%
(sum(ok), len(ok), self.i.get_error()))
return ({}, False)
chans = self.get_channels("list")
for ch in chans:
key = self.cfg[str(ch)]["name"]
idx = chans.index(ch)
res[key] = {"Volt": raw["VOLT"][idx],
"Curr": raw["CURR"][idx],
"Watt": raw["VOLT"][idx] * raw["CURR"][idx],
"On": raw["ON"][idx]
}
return (res, all(ok))
| {
"repo_name": "taiwenko/python",
"path": "instruments/AgilentN6705B.py",
"copies": "1",
"size": "4267",
"license": "mit",
"hash": 6568216786939185000,
"line_mean": 29.2624113475,
"line_max": 78,
"alpha_frac": 0.5683149754,
"autogenerated": false,
"ratio": 3.3651419558359623,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9378423942646746,
"avg_score": 0.011006597717843129,
"num_lines": 141
} |
""" Agilent's 'Masshunter Quant'
"""
from DateTime import DateTime
from Products.Archetypes.event import ObjectInitializedEvent
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims import logger
from bika.lims.browser import BrowserView
from bika.lims.idserver import renameAfterCreation
from bika.lims.utils import changeWorkflowState
from bika.lims.utils import tmpID
from cStringIO import StringIO
from datetime import datetime
from operator import itemgetter
from plone.i18n.normalizer.interfaces import IIDNormalizer
from zope.component import getUtility
import csv
import json
import plone
import zope
import zope.event
from bika.lims.exportimport.instruments.resultsimport import InstrumentCSVResultsFileParser,\
AnalysisResultsImporter
import traceback
title = "Agilent - Masshunter Quantitative"
def Import(context, request):
""" Read Agilent's Masshunter Quant analysis results
"""
infile = request.form['amhq_file']
fileformat = request.form['amhq_format']
artoapply = request.form['amhq_artoapply']
override = request.form['amhq_override']
sample = request.form.get('amhq_sample', 'requestid')
instrument = request.form.get('amhq_instrument', None)
errors = []
logs = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
elif fileformat == 'csv':
parser = MasshunterQuantCSVParser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample =='requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = MasshunterQuantImporter(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
class MasshunterQuantCSVParser(InstrumentCSVResultsFileParser):
HEADERKEY_BATCHINFO = 'Batch Info'
HEADERKEY_BATCHDATAPATH = 'Batch Data Path'
HEADERKEY_ANALYSISTIME = 'Analysis Time'
HEADERKEY_ANALYSTNAME = 'Analyst Name'
HEADERKEY_REPORTTIME = 'Report Time'
HEADERKEY_REPORTERNAME = 'Reporter Name'
HEADERKEY_LASTCALIBRATION = 'Last Calib Update'
HEADERKEY_BATCHSTATE = 'Batch State'
SEQUENCETABLE_KEY = 'Sequence Table'
SEQUENCETABLE_HEADER_DATAFILE = 'Data File'
SEQUENCETABLE_HEADER_SAMPLENAME = 'Sample Name'
SEQUENCETABLE_PRERUN = 'prerunrespchk.d'
SEQUENCETABLE_MIDRUN = 'mid_respchk.d'
SEQUENCETABLE_POSTRUN = 'post_respchk.d'
SEQUENCETABLE_NUMERICHEADERS = ('Inj Vol',)
QUANTITATIONRESULTS_KEY = 'Quantification Results'
QUANTITATIONRESULTS_TARGETCOMPOUND = 'Target Compound'
QUANTITATIONRESULTS_HEADER_DATAFILE = 'Data File'
QUANTITATIONRESULTS_PRERUN = 'prerunrespchk.d'
QUANTITATIONRESULTS_MIDRUN = 'mid_respchk.d'
QUANTITATIONRESULTS_POSTRUN = 'post_respchk.d'
QUANTITATIONRESULTS_NUMERICHEADERS = ('Resp', 'ISTD Resp', 'Resp Ratio',
'Final Conc', 'Exp Conc', 'Accuracy')
QUANTITATIONRESULTS_COMPOUNDCOLUMN = 'Compound'
COMMAS = ','
def __init__(self, csv):
InstrumentCSVResultsFileParser.__init__(self, csv)
self._end_header = False
self._end_sequencetable = False
self._sequences = []
self._sequencesheader = []
self._quantitationresultsheader = []
self._numline = 0
def getAttachmentFileType(self):
return "Agilent's Masshunter Quant CSV"
def _parseline(self, line):
if self._end_header == False:
return self.parse_headerline(line)
elif self._end_sequencetable == False:
return self.parse_sequencetableline(line)
else:
return self.parse_quantitationesultsline(line)
def parse_headerline(self, line):
""" Parses header lines
Header example:
Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
"""
if self._end_header == True:
# Header already processed
return 0
if line.startswith(self.SEQUENCETABLE_KEY):
self._end_header = True
if len(self._header) == 0:
self.err("No header found", numline=self._numline)
return -1
return 0
splitted = [token.strip() for token in line.split(',')]
# Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
if splitted[0] == self.HEADERKEY_BATCHINFO:
if self.HEADERKEY_BATCHINFO in self._header:
self.warn("Header Batch Info already found. Discarding",
numline=self._numline, line=line)
return 0
self._header[self.HEADERKEY_BATCHINFO] = []
for i in range(len(splitted) - 1):
if splitted[i + 1]:
self._header[self.HEADERKEY_BATCHINFO].append(splitted[i + 1])
# Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_BATCHDATAPATH:
if self.HEADERKEY_BATCHDATAPATH in self._header:
self.warn("Header Batch Data Path already found. Discarding",
numline=self._numline, line=line)
return 0;
if splitted[1]:
self._header[self.HEADERKEY_BATCHDATAPATH] = splitted[1]
else:
self.warn("Batch Data Path not found or empty",
numline=self._numline, line=line)
# Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_ANALYSISTIME:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_ANALYSISTIME] = d
except ValueError:
self.err("Invalid Analysis Time format",
numline=self._numline, line=line)
else:
self.warn("Analysis Time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_ANALYSTNAME:
if splitted[3]:
self._header[self.HEADERKEY_ANALYSTNAME] = splitted[3]
else:
self.warn("Analyst Name not found or empty",
numline=self._numline, line=line)
else:
self.err("Analyst Name not found",
numline=self._numline, line=line)
# Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_REPORTTIME:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_REPORTTIME] = d
except ValueError:
self.err("Invalid Report Time format",
numline=self._numline, line=line)
else:
self.warn("Report time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_REPORTERNAME:
if splitted[3]:
self._header[self.HEADERKEY_REPORTERNAME] = splitted[3]
else:
self.warn("Reporter Name not found or empty",
numline=self._numline, line=line)
else:
self.err("Reporter Name not found",
numline=self._numline, line=line)
# Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
elif splitted[0] == self.HEADERKEY_LASTCALIBRATION:
if splitted[1]:
try:
d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
self._header[self.HEADERKEY_LASTCALIBRATION] = d
except ValueError:
self.err("Invalid Last Calibration time format",
numline=self._numline, line=line)
else:
self.warn("Last Calibration time not found or empty",
numline=self._numline, line=line)
if splitted[2] and splitted[2] == self.HEADERKEY_BATCHSTATE:
if splitted[3]:
self._header[self.HEADERKEY_BATCHSTATE] = splitted[3]
else:
self.warn("Batch state not found or empty",
numline=self._numline, line=line)
else:
self.err("Batch state not found",
numline=self._numline, line=line)
return 0
def parse_sequencetableline(self, line):
""" Parses sequence table lines
Sequence Table example:
Sequence Table,,,,,,,,,,,,,,,,,
Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L2.d,DSS_Nist_L2,P1-B2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
DSS_Nist_L3.d,DSS_Nist_L3,P1-C2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_L1.d,UTAK_DS_L1,P1-D2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_L2.d,UTAK_DS_L2,P1-E2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
UTAK_DS_low.d,UTAK_DS_Low,P1-F2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
FDBS_31.d,FDBS_31,P1-G2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
FDBS_32.d,FDBS_32,P1-H2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_60-r001.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_60-r002.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_61-r001.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
LS_61-r002.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
"""
# Sequence Table,,,,,,,,,,,,,,,,,
# prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# ,,,,,,,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_KEY) \
or line.startswith(self.SEQUENCETABLE_PRERUN) \
or line.startswith(self.SEQUENCETABLE_MIDRUN) \
or self._end_sequencetable == True:
# Nothing to do, continue
return 0
# Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_HEADER_DATAFILE):
self._sequencesheader = [token.strip() for token in line.split(',') if token.strip()]
return 0
# post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
# Quantitation Results,,,,,,,,,,,,,,,,,
if line.startswith(self.SEQUENCETABLE_POSTRUN) \
or line.startswith(self.QUANTITATIONRESULTS_KEY) \
or line.startswith(self.COMMAS):
self._end_sequencetable = True
if len(self._sequences) == 0:
self.err("No Sequence Table found", linenum=self._numline)
return -1
# Jumps 2 lines:
# Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
# prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
return 2
# DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
splitted = [token.strip() for token in line.split(',')]
sequence = {}
for colname in self._sequencesheader:
sequence[colname] = ''
for i in range(len(splitted)):
token = splitted[i]
if i < len(self._sequencesheader):
colname = self._sequencesheader[i]
if token and colname in self.SEQUENCETABLE_NUMERICHEADERS:
try:
sequence[colname] = float(token)
except ValueError:
self.warn(
"No valid number ${token} in column ${index} (${column_name})",
mapping={"token": token,
"index": str(i + 1),
"column_name": colname},
numline=self._numline, line=line)
sequence[colname] = token
else:
sequence[colname] = token
elif token:
self.err("Orphan value in column ${index} (${token})",
mapping={"index": str(i+1),
"token": token},
numline=self._numline, line=line)
self._sequences.append(sequence)
def parse_quantitationesultsline(self, line):
""" Parses quantitation result lines
Quantitation results example:
Quantitation Results,,,,,,,,,,,,,,,,,
Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,,
Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,,
DSS_Nist_L1.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,6103,139562,0.0437,1.6912,,,,,,,,,,,
DSS_Nist_L2.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,11339,135726,0.0835,3.0510,,,,,,,,,,,
DSS_Nist_L3.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15871,141710,0.1120,4.0144,,,,,,,,,,,
mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,,
DSS_Nist_L3-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15659,129490,0.1209,4.3157,,,,,,,,,,,
UTAK_DS_L1-r001.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,29846,132264,0.2257,7.7965,,,,,,,,,,,
UTAK_DS_L1-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,28696,141614,0.2026,7.0387,,,,,,,,,,,
post_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5022,231748,0.0217,0.9315,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
Target Compound,25-OH D2+PTAD+MA,,,,,,,,,,,,,,,,
Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
prerunrespchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6222,274638,0.0227,0.8835,,,,,,,,,,,
DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,,
DSS_Nist_L2.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,3937,135726,0.0290,0.9265,,,,,,,,,,,
DSS_Nist_L3.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,826,141710,0.0058,0.7697,,,,,,,,,,,
mid_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,7864,242798,0.0324,0.9493,,,,,,,,,,,
DSS_Nist_L3-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,853,129490,0.0066,0.7748,,,,,,,,,,,
UTAK_DS_L1-r001.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,127496,132264,0.9639,7.1558,,,,,,,,,,,
UTAK_DS_L1-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,135738,141614,0.9585,7.1201,,,,,,,,,,,
post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,,
,,,,,,,,,,,,,,,,,
"""
# Quantitation Results,,,,,,,,,,,,,,,,,
# prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,,
# mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,,
# post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,,
# ,,,,,,,,,,,,,,,,,
if line.startswith(self.QUANTITATIONRESULTS_KEY) \
or line.startswith(self.QUANTITATIONRESULTS_PRERUN) \
or line.startswith(self.QUANTITATIONRESULTS_MIDRUN) \
or line.startswith(self.QUANTITATIONRESULTS_POSTRUN) \
or line.startswith(self.COMMAS):
# Nothing to do, continue
return 0
# Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
if line.startswith(self.QUANTITATIONRESULTS_HEADER_DATAFILE):
self._quantitationresultsheader = [token.strip() for token in line.split(',') if token.strip()]
return 0
# Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,,
if line.startswith(self.QUANTITATIONRESULTS_TARGETCOMPOUND):
# New set of Quantitation Results
splitted = [token.strip() for token in line.split(',')]
if not splitted[1]:
self.warn("No Target Compound found",
numline=self._numline, line=line)
return 0
# DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,,
splitted = [token.strip() for token in line.split(',')]
quantitation = {}
for colname in self._quantitationresultsheader:
quantitation[colname] = ''
for i in range(len(splitted)):
token = splitted[i]
if i < len(self._quantitationresultsheader):
colname = self._quantitationresultsheader[i]
if token and colname in self.QUANTITATIONRESULTS_NUMERICHEADERS:
try:
quantitation[colname] = float(token)
except ValueError:
self.warn(
"No valid number ${token} in column ${index} (${column_name})",
mapping={"token": token,
"index": str(i + 1),
"column_name": colname},
numline=self._numline, line=line)
quantitation[colname] = token
else:
quantitation[colname] = token
elif token:
self.err("Orphan value in column ${index} (${token})",
mapping={"index": str(i+1),
"token": token},
numline=self._numline, line=line)
if self.QUANTITATIONRESULTS_COMPOUNDCOLUMN in quantitation:
compound = quantitation[self.QUANTITATIONRESULTS_COMPOUNDCOLUMN]
# Look for sequence matches and populate rawdata
datafile = quantitation.get(self.QUANTITATIONRESULTS_HEADER_DATAFILE, '')
if not datafile:
self.err("No Data File found for quantitation result",
numline=self._numline, line=line)
else:
seqs = [sequence for sequence in self._sequences \
if sequence.get('Data File', '') == datafile]
if len(seqs) == 0:
self.err("No sample found for quantitative result ${data_file}",
mapping={"data_file": datafile},
numline=self._numline, line=line)
elif len(seqs) > 1:
self.err("More than one sequence found for quantitative result: ${data_file}",
mapping={"data_file": datafile},
numline=self._numline, line=line)
else:
objid = seqs[0].get(self.SEQUENCETABLE_HEADER_SAMPLENAME, '')
if objid:
quantitation['DefaultResult'] = 'Final Conc'
quantitation['Remarks'] = _("Autoimport")
rows = self.getRawResults().get(objid, [])
raw = rows[0] if len(rows) > 0 else {}
raw[compound] = quantitation
self._addRawResult(objid, raw, True)
else:
self.err("No valid sequence for ${data_file}",
mapping={"data_file": datafile},
numline=self._numline, line=line)
else:
self.err("Value for column '${column}' not found",
mapping={"column": self.QUANTITATIONRESULTS_COMPOUNDCOLUMN},
numline=self._numline, line=line)
class MasshunterQuantImporter(AnalysisResultsImporter):
def __init__(self, parser, context, idsearchcriteria, override,
allowed_ar_states=None, allowed_analysis_states=None,
instrument_uid=''):
AnalysisResultsImporter.__init__(self, parser, context, idsearchcriteria,
override, allowed_ar_states,
allowed_analysis_states,
instrument_uid)
| {
"repo_name": "hocinebendou/bika.gsoc",
"path": "bika/lims/exportimport/instruments/agilent/masshunter/quantitative.py",
"copies": "5",
"size": "23863",
"license": "mit",
"hash": -6618794874881368000,
"line_mean": 46.9176706827,
"line_max": 138,
"alpha_frac": 0.5486736789,
"autogenerated": false,
"ratio": 3.4335251798561153,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6482198858756115,
"avg_score": null,
"num_lines": null
} |
# a gimbal plus the chopper itself
import math
from math import degrees, radians, sin, cos, atan2, asin
import random
#from modules.pyrexopengl import *
#from modules.pyrexopenglconstans import *
import vector
import axial_frame
import coordinate_system
from OpenGL.GL import *
import cube
import cubes_vbo
# Define a simple function to create ctypes arrays of floats:
def vec(*args):
return list(args)
#return (c_float * len(args))(*args)
#def vecl(lst):
# return (GLfloat * len(lst))(*lst)
# set_imu_ocs(
# set_abs_desired_camera_orientation(
# set_rel_desired_camera_orientation(
# set_camera_trackpoint(
# tick(
class Gimbal1:
MODE_ABSOLUTE = 1 # tries to keep the camera steady in world-space
MODE_RELATIVE = 2 # direct manual manipulation of gimbal motor angles
MODE_TRACK = 3 # keeps the camera pointed at the given world-coordinate
MODE_RAW = 4 # just take yaw/roll/pitch directly
def __init__(self):
self.mode = self.MODE_RAW
self.ocs = coordinate_system.CoordinateSystem()
self.camera_desired_orientation = axial_frame.AxialFrame()
self.camera_trackpoint = vector.Vector()
self.camera_trackpoint_normal = vector.Vector()
# calculated motor angles
self.yaw = 0.
self.roll = 0.
self.pitch = 0.
cubes_lists, ocs_list = self._build_gimbal()
cubes_yaw = cubes_lists[0]
cubes_roll = cubes_lists[1]
cubes_pitch = cubes_lists[2]
self.ocs_yaw = ocs_list[0]
self.ocs_roll = ocs_list[1]
self.ocs_pitch = ocs_list[2]
# vertex_lists = self._build_body()
# self.vertex_lists_blade = vertex_lists[0]
# self.vertex_lists_body = vertex_lists[1]
cubes_body = self._build_body()
self.cubes_vbo_body = cubes_vbo.CubesVbo(len(cubes_body))
self.cubes_vbo_body.update_cubes(0, cubes_body)
self.cubes_vbo_yaw = cubes_vbo.CubesVbo(len(cubes_yaw))
self.cubes_vbo_yaw.update_cubes(0, cubes_yaw)
self.cubes_vbo_roll = cubes_vbo.CubesVbo(len(cubes_roll))
self.cubes_vbo_roll.update_cubes(0, cubes_roll)
self.cubes_vbo_pitch = cubes_vbo.CubesVbo(len(cubes_pitch))
self.cubes_vbo_pitch.update_cubes(0, cubes_pitch)
self.color = (1., 0., 0.2, 1.)
def set_imu_ocs(self, ocs):
self.ocs.set(ocs)
def set_raw_orientation_angles(self, yaw, roll, pitch):
self.yaw = yaw
self.roll = roll
self.pitch = pitch
self.mode = self.MODE_RAW
def set_abs_desired_camera_orientation(self, a_frame):
self.camera_desired_orientation.set(a_frame)
self.mode = self.MODE_ABSOLUTE
def set_rel_desired_camera_orientation(self, a_frame):
self.camera_desired_orientation.set(a_frame)
self.mode = self.MODE_RELATIVE
def set_camera_trackpoint(self, trackpoint, normal):
self.camera_trackpoint.set(trackpoint)
self.camera_trackpoint_normal.set(normal)
self.mode = self.MODE_TRACK
def _calc_gimbal_angles(self, gimbal_uav_aframe):
# works if desired gimbal orient is not rolled near 90 degrees
gxa = gimbal_uav_aframe.x_axis
gza = gimbal_uav_aframe.z_axis
yaw = -atan2(gxa[0], gxa[2]) + radians(90.)
roll = -asin(gxa[1])
z_axis_without_pitch = vector.Vector((sin(-yaw), 0., cos(-yaw)))
y_axis_without_pitch = gxa.cross(z_axis_without_pitch)
pitch = atan2(y_axis_without_pitch.dot(gza), z_axis_without_pitch.dot(gza))
return degrees(yaw), degrees(roll), degrees(pitch)
def _build_blade(self, ofs):
cubes = []
blade_col = (.15, .15, .15, 1.)
blade_len = 1.1
blade_width = blade_len * 0.08
blade_thickness = 0.008
angle = random.uniform(0., 360.)
# two tited halves of a quad-blade.
c = cube.Cube(blade_len / 2., blade_thickness, blade_width)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, angle)
c.ocs.a_frame.rotate(c.ocs.a_frame.x_axis, 5.)
c.ocs.pos.add(c.ocs.a_frame.x_axis * blade_len / 4.)
c.ocs.pos.add(ofs)
c.set_color_f(*blade_col)
cubes.append(c)
c = cube.Cube(blade_len / 2., blade_thickness, blade_width)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, angle)
c.ocs.a_frame.rotate(c.ocs.a_frame.x_axis, -5.)
c.ocs.pos.add(c.ocs.a_frame.x_axis * -blade_len / 4.)
c.ocs.pos.add(ofs)
c.set_color_f(*blade_col)
cubes.append(c)
# two tited halves of a quad-blade. same as before, but rotated 90 degrees.
c = cube.Cube(blade_len / 2., blade_thickness, blade_width)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, angle - 90.)
c.ocs.a_frame.rotate(c.ocs.a_frame.x_axis, 5.)
c.ocs.pos.add(c.ocs.a_frame.x_axis * blade_len / 4.)
c.ocs.pos.add(ofs)
c.set_color_f(*blade_col)
cubes.append(c)
c = cube.Cube(blade_len / 2., blade_thickness, blade_width)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, angle - 90.)
c.ocs.a_frame.rotate(c.ocs.a_frame.x_axis, -5.)
c.ocs.pos.add(c.ocs.a_frame.x_axis * -blade_len / 4.)
c.ocs.pos.add(ofs)
c.set_color_f(*blade_col)
cubes.append(c)
return cubes
def _build_body(self):
cubes = []
# body
c = cube.Cube(0.6, 0.03, 0.6)
body_ocs = coordinate_system.CoordinateSystem()
body_ocs.set(c.ocs)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, 45.)
c.set_color_f(.7, .7, .7, 1.)
cubes.append(c)
# support beams
beam_len = 1.9
beam_width = 0.05
beam_color = (.1, .1, .1, 1.)
c = cube.Cube(beam_len, beam_width, beam_width)
beam1_ocs = c.ocs
c.set_color_f(*beam_color)
cubes.append(c)
c = cube.Cube(beam_len, beam_width, beam_width)
c.ocs.a_frame.rotate(c.ocs.a_frame.y_axis, 90.)
c.set_color_f(*beam_color)
cubes.append(c)
beam_len = 1.9
blade_yofs = 0.1
# blades
cubes += self._build_blade( body_ocs.a_frame.x_axis * beam_len * 0.5 + c.ocs.a_frame.y_axis * blade_yofs)
cubes += self._build_blade(-body_ocs.a_frame.x_axis * beam_len * 0.5 + c.ocs.a_frame.y_axis * blade_yofs)
cubes += self._build_blade( body_ocs.a_frame.z_axis * beam_len * 0.5 + c.ocs.a_frame.y_axis * blade_yofs)
cubes += self._build_blade(-body_ocs.a_frame.z_axis * beam_len * 0.5 + c.ocs.a_frame.y_axis * blade_yofs)
return cubes
def _build_gimbal(self):
scale = 0.5
# yaw platform
ocs_yaw = coordinate_system.CoordinateSystem()
s = .05 * scale # pipe base radius
h = .5 * scale
gimbal_color = (1., 0., 0.2, 1.)
cubes_yaw = [self._gen_box(-s, s, -s, s, -h-s, s)]
for c in cubes_yaw:
c.set_color_f(*gimbal_color)
# roll platform (top view)
# 0 x1
# | |
# O | O--z2
# O | O
# O | O
# OOOOOOOOOOO--z1
# O
# O-------0
#
ocs_roll = coordinate_system.CoordinateSystem()
ocs_roll.pos.set((0., -h, 0.))
s1 = s*.8; s2 = s; s3 = s*.9
x1 = .5 * scale; z1 = .2 * scale; z2 = z1 + .5 * scale
cubes_roll = [
self._gen_box( -s1, s1, -s1, s1, -s1, z1+s1), # 0 .. z1
self._gen_box(-x1-s2, s2, z1-s2, x1+s2, -s2, z1+s2), # -x1 .. x1
self._gen_box(-x1-s3, s3, z1+s3, -x1+s3, -s3, z2+s3), # z1 .. z2 (left)
self._gen_box( x1-s3, s3, z1+s3, x1+s3, -s3, z2+s3)] # z1 .. z2 (right)
for c in cubes_roll:
c.set_color_f(*gimbal_color)
# pitch platform (top view)
# 0 x1 x2
# | | |
# OOOOOOOOO--|--z1
# O O |
# OOOO OOOO--0
# O O
# OOOOOOOOO
#
ocs_pitch = coordinate_system.CoordinateSystem()
ocs_pitch.pos.set((0., 0., z2))
s1 = s*.3 # pole radius
s2 = s*.4 # platform thickness
x2 = x1; x1 = x1*.8; z1 = (z2-z1)*.8
cubes_pitch = [
self._gen_box( -x2-s1, s1, -s1, x2+s1, -s1, s1), # pole
self._gen_box( -x1, s2, -z1, x1, -s2, z1)] # platform
for c in cubes_pitch:
c.set_color_f(*gimbal_color)
return (cubes_yaw, cubes_roll, cubes_pitch), (ocs_yaw, ocs_roll, ocs_pitch)
def _rotate_motors_to(self, yaw, roll, pitch):
self.ocs_yaw.a_frame.reset()
self.ocs_yaw.a_frame.rotate(self.ocs_yaw.a_frame.y_axis, yaw)
self.ocs_roll.a_frame.reset()
self.ocs_roll.a_frame.rotate(self.ocs_roll.a_frame.z_axis, roll)
self.ocs_pitch.a_frame.reset()
self.ocs_pitch.a_frame.rotate(self.ocs_pitch.a_frame.x_axis, pitch)
def _gen_box(self, x1, y1, z1, x2, y2, z2):
""" x1, y1, z1 - top-left-close point. x2, y2, z2 - bottom-right-far point """
assert x2>x1 and y1>y2 and z2>z1, "%f %f %f %f %f %f" % (x1, x2, y1, y2, z1, z2)
c = cube.Cube(x2 - x1, y1 - y2, z2 - z1)
c.ocs.pos.add(((x1 + x2) / 2., (y1 + y2) / 2., (z1 + z2) / 2.))
return c
# normal list is a kludge. normal of the last quad vertex is used.
# vl = pyglet.graphics.vertex_list_indexed(8,
# # front right back left top bottom
# [0,1,2,3, 1,5,6,2, 5,4,7,6, 3,7,4,0, 0,4,5,1, 3,2,6,7],
# ('v3f/static', (x2,y1,z2, x1,y1,z2, x1,y2,z2, x2,y2,z2, x2,y1,z1, x1,y1,z1, x1,y2,z1, x2,y2,z1)),
# ('n3f/static', (1,0,0, 0,1,0, -1,0,0, 0,0,1, 2,2,2, 2,2,2, 0,0,-1, 0,-1,0)))
# #('n3f/static', (-1,0,0, 0,1,0, 1,0,0, 0,0,-1, 2,2,2, 2,2,2, 0,0,1, 0,-1,0)))
# return vl
# def _gen_box_center(self, x, y, z, xs, ys, zs):
# """ generate a box given its center pos and dimensions """
# xs *= .5; ys *= .5; zs *= .5
# vl = [-xs,ys,-zs, xs,ys,-zs, xs,-ys,-zs, -xs,-ys,-zs, -xs,ys,zs, xs,ys,zs, xs,-ys,zs, -xs,-ys,zs]
# vl = [d + [x,y,z][i%3] for i, d in enumerate(vl)]
# # normal list is a kludge. normal of the last quad vertex is used.
# vl = pyglet.graphics.vertex_list_indexed(8,
# # front right back left top bottom
# [0,1,2,3, 1,5,6,2, 5,4,7,6, 3,7,4,0, 0,4,5,1, 3,2,6,7],
# ('v3f/static', vl),
# ('n3f/static', (-1,0,0, 0,1,0, 1,0,0, 0,0,-1, 2,2,2, 2,2,2, 0,0,1, 0,-1,0)))
# return vl
def tick(self, dt):
if self.mode == self.MODE_RAW:
pass
elif self.mode == self.MODE_ABSOLUTE:
gimbal_rel_aframe = self.ocs.a_frame.proj_in(self.camera_desired_orientation)
self.yaw, self.roll, self.pitch = self._calc_gimbal_angles(gimbal_rel_aframe)
elif self.mode == self.MODE_RELATIVE:
self.yaw, self.roll, self.pitch = self._calc_gimbal_angles(self.camera_desired_orientation)
elif self.mode == self.MODE_TRACK:
viewdirection = self.camera_trackpoint - self.ocs.pos
self.camera_desired_orientation.look_direction2(viewdirection, self.camera_trackpoint_normal)
self.yaw, self.roll, self.pitch = self._calc_gimbal_angles(self.camera_desired_orientation)
self._rotate_motors_to(self.yaw, self.roll, self.pitch)
def render(self):
# return
r, g, b, a = self.color[0], self.color[1], self.color[2], self.color[3]
# if glColorMaterial is set to GL_AMBIENT_AND_DIFFUSE, then these GL_AMBIENT and GL_DIFFUSE here have no
# effect, because the ambient and diffuse values are being taken from vertices themselves.
#glMaterialfv(GL_FRONT, GL_AMBIENT, (GLfloat*4)(.0, .0, .0, 1.))
#glMaterialfv(GL_FRONT, GL_DIFFUSE, (GLfloat*4)(1., .1, 1., 1.)) # has no effect if using GL_COLOR_MATERIAL
glMaterialfv(GL_FRONT, GL_AMBIENT, vec(0.1, 0.1, 0.1, 1.))
glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(1., 1., 1., 1.))
# glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(r, g, b, 1.))
glMaterialfv(GL_FRONT, GL_EMISSION, vec(0., 0., 0., 1.))
#glMaterialfv(GL_FRONT, GL_EMISSION, vec(0., 0., 0., 1.))
glMaterialfv(GL_FRONT, GL_SPECULAR, vec(0., 0., 0., 1.))
glMaterialf(GL_FRONT, GL_SHININESS, 100.)
# glMaterialfv(GL_FRONT, GL_SPECULAR, (GLfloat*4)( .2, .2, .2, 1.))
# glMaterialfv(GL_FRONT, GL_SHININESS, (GLfloat)(30.))
# glColor4f(r, g, b, a) # works in case of glDisable(GL_LIGHTING)
glColor4f(0., 0., 0., 0.) # works in case of glDisable(GL_LIGHTING)
glPushMatrix()
glMultMatrixf(self.ocs.get_opengl_matrix2())
#GL.glMaterialfv(GL.GL_FRONT, GL.GL_DIFFUSE, vec(.9, .9, .9, 1.))
self.cubes_vbo_body.render()
#for v in self.vertex_lists_body: v.draw(pyglet.gl.GL_QUADS)
# glPushMatrix()
# glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(.2, .2, .2, 1.))
# glRotatef(45., 0., 1., 0.)
# for v in self.vertex_lists_blade: v.draw(pyglet.gl.GL_QUADS)
# glTranslatef(0., 0.01, 0.)
# glRotatef(90., 0., 1., 0.)
# for v in self.vertex_lists_blade: v.draw(pyglet.gl.GL_QUADS)
# glPopMatrix()
# return
r, g, b, a = self.color[0], self.color[1], self.color[2], self.color[3]
# glMaterialfv(GL_FRONT, GL_AMBIENT, vec(0., 0., 0., 1.))
# glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(r, g, b, 1.))
# glMaterialfv(GL_FRONT, GL_EMISSION, vec(0.15, 0.15, 0.15, 1.))
# glMaterialfv(GL_FRONT, GL_SPECULAR, vec(0., 0., 0., 1.))
# glMaterialf(GL_FRONT, GL_SHININESS, 100.)
#glColor4f(r, g, b, a) # works in case of glDisable(GL_LIGHTING)
# glPushMatrix()
# glMultMatrixf2(self.ocs.get_opengl_matrix2())
# glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(.9, .9, .9, 1.))
# for v in self.vertex_lists_body: v.draw(pyglet.gl.GL_QUADS)
# glPushMatrix()
# glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(.2, .2, .2, 1.))
# glRotatef(45., 0., 1., 0.)
# for v in self.vertex_lists_blade: v.draw(pyglet.gl.GL_QUADS)
# glTranslatef(0., 0.01, 0.)
# glRotatef(90., 0., 1., 0.)
# for v in self.vertex_lists_blade: v.draw(pyglet.gl.GL_QUADS)
# glPopMatrix()
if 1:
glPushMatrix()
glMultMatrixf(self.ocs_yaw.get_opengl_matrix2())
glMaterialfv(GL_FRONT, GL_DIFFUSE, vec(r, g, b, 1.))
#for v in self.vertex_lists_yaw: v.draw(pyglet.gl.GL_QUADS)
self.cubes_vbo_yaw.render()
#self._draw_axis(2.)
if 1:
glPushMatrix()
glMultMatrixf(self.ocs_roll.get_opengl_matrix2())
#for v in self.vertex_lists_roll: v.draw(pyglet.gl.GL_QUADS)
self.cubes_vbo_roll.render()
#self._draw_axis(2.)
if 1:
glPushMatrix()
glMultMatrixf(self.ocs_pitch.get_opengl_matrix2())
#for v in self.vertex_lists_pitch: v.draw(pyglet.gl.GL_QUADS)
self.cubes_vbo_pitch.render()
self._draw_axis_engineers(1.)
glPopMatrix()
glPopMatrix()
glPopMatrix()
glPopMatrix()
def _draw_axis(self, r):
glPushAttrib(GL_ENABLE_BIT)
glDisable(GL_LIGHTING)
glLineWidth(4.)
glBegin(GL_LINES)
# x-axis. red
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0., 0., 0.)
glVertex3f( r, 0., 0.)
# y-axis. green
glColor3f(0.0, 1.0, 0.0)
glVertex3f(0., 0., 0.)
glVertex3f(0., r, 0.)
# z-axis. blue
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0., 0., 0.)
glVertex3f(0., 0., r)
glEnd()
glPopAttrib()
def _draw_axis_engineers(self, r):
""" x (red) is forward, y (green) is left, z (blue) is up """
glPushAttrib(GL_ENABLE_BIT)
glDisable(GL_LIGHTING)
glLineWidth(2.)
glBegin(GL_LINES)
# x-axis. red
glColor3f(1.0, 0.0, 0.0)
glVertex3f(0., 0., 0.)
glVertex3f(0., 0., r)
# y-axis. green
glColor3f(0.0, 1.0, 0.0)
glVertex3f( 0., 0., 0.)
glVertex3f(-r, 0., 0.)
# z-axis. blue
glColor3f(0.0, 0.0, 1.0)
glVertex3f(0., 0., 0.)
glVertex3f(0., r, 0.)
glEnd()
glPopAttrib()
| {
"repo_name": "fdkz/flybyrift",
"path": "system/gimbal1.py",
"copies": "1",
"size": "17209",
"license": "mit",
"hash": 2857070440420656600,
"line_mean": 36.0730088496,
"line_max": 118,
"alpha_frac": 0.5295484921,
"autogenerated": false,
"ratio": 2.7053922339254832,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3734940726025483,
"avg_score": null,
"num_lines": null
} |
"""AGIPD & LPD geometry handling."""
from cfelpyutils.crystfel_utils import load_crystfel_geometry
from copy import copy
import h5py
from itertools import product
import numpy as np
from scipy.ndimage import affine_transform
import warnings
from .crystfel_fmt import write_crystfel_geom
__all__ = ['AGIPD_1MGeometry', 'LPD_1MGeometry']
class GeometryFragment:
"""Holds the 3D position & orientation of one detector tile
corner_pos refers to the corner of the detector tile where the first pixel
stored is located. The tile is assumed to be a rectangle of ss_pixels in
the slow scan dimension and fs_pixels in the fast scan dimension.
ss_vec and fs_vec are vectors for a step of one pixel in each dimension.
The coordinates in this class are (x, y, z), in metres.
"""
def __init__(self, corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels):
self.corner_pos = corner_pos
self.ss_vec = ss_vec
self.fs_vec = fs_vec
self.ss_pixels = ss_pixels
self.fs_pixels = fs_pixels
@classmethod
def from_panel_dict(cls, d):
res = d['res']
corner_pos = np.array([d['cnx'], d['cny'], d['coffset']]) / res
ss_vec = np.array([d['ssx'], d['ssy'], d['ssz']]) / res
fs_vec = np.array([d['fsx'], d['fsy'], d['fsz']]) / res
ss_pixels = d['max_ss'] - d['min_ss'] + 1
fs_pixels = d['max_fs'] - d['min_fs'] + 1
return cls(corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels)
def corners(self):
return np.stack([
self.corner_pos,
self.corner_pos + (self.fs_vec * self.fs_pixels),
self.corner_pos + (self.ss_vec * self.ss_pixels) + (self.fs_vec * self.fs_pixels),
self.corner_pos + (self.ss_vec * self.ss_pixels),
])
def centre(self):
return (
self.corner_pos
+ (0.5 * self.ss_vec * self.ss_pixels)
+ (0.5 * self.fs_vec * self.fs_pixels)
)
def snap(self, px_shape):
# Round positions and vectors to integers, drop z dimension
corner_pos = np.around(self.corner_pos[:2] / px_shape).astype(np.int32)
ss_vec = np.around(self.ss_vec[:2] / px_shape).astype(np.int32)
fs_vec = np.around(self.fs_vec[:2] / px_shape).astype(np.int32)
# We should have one vector in the x direction and one in y, but
# we don't know which is which.
assert {tuple(np.abs(ss_vec)), tuple(np.abs(fs_vec))} == {(0, 1), (1, 0)}
# Convert xy coordinates to yx indexes
return GridGeometryFragment(
corner_pos[::-1], ss_vec[::-1], fs_vec[::-1], self.ss_pixels, self.fs_pixels
)
class GridGeometryFragment:
"""Holds the 2D axis-aligned position and orientation of one detector tile.
This is used in 'snapped' geometry which efficiently assembles a detector
image into a 2D array.
These coordinates are all (y, x), suitable for indexing a numpy array.
ss_vec and fs_vec must be length 1 vectors in either positive or negative
x or y direction. In the output array, the fast scan dimension is always x.
So if the input data is oriented with fast-scan vertical, we need to
transpose it first.
Regardless of transposition, we may also need to flip the data on one or
both axes; the fs_order and ss_order variables handle this.
"""
def __init__(self, corner_pos, ss_vec, fs_vec, ss_pixels, fs_pixels):
self.ss_vec = ss_vec
self.fs_vec = fs_vec
self.ss_pixels = ss_pixels
self.fs_pixels = fs_pixels
if fs_vec[0] == 0:
# Fast scan is x dimension: Flip without transposing
fs_order = fs_vec[1]
ss_order = ss_vec[0]
self.transform = lambda arr: arr[..., ::ss_order, ::fs_order]
corner_shift = np.array([
min(ss_order, 0) * self.ss_pixels,
min(fs_order, 0) * self.fs_pixels
])
self.pixel_dims = np.array([self.ss_pixels, self.fs_pixels])
else:
# Fast scan is y : Transpose so fast scan -> x and then flip
fs_order = fs_vec[0]
ss_order = ss_vec[1]
self.transform = lambda arr: arr.swapaxes(-1, -2)[..., ::fs_order, ::ss_order]
corner_shift = np.array([
min(fs_order, 0) * self.fs_pixels,
min(ss_order, 0) * self.ss_pixels
])
self.pixel_dims = np.array([self.fs_pixels, self.ss_pixels])
self.corner_idx = corner_pos + corner_shift
self.opp_corner_idx = self.corner_idx + self.pixel_dims
class DetectorGeometryBase:
"""Base class for detector geometry. Subclassed for specific detectors."""
# Define in subclasses:
pixel_size = 0.0
frag_ss_pixels = 0
frag_fs_pixels = 0
n_modules = 0
n_tiles_per_module = 0
expected_data_shape = (0, 0, 0)
_pixel_corners = np.array([ # pixel units; overridden for DSSC
[0, 1, 1, 0], # slow-scan
[0, 0, 1, 1] # fast-scan
])
_draw_first_px_on_tile = 1 # Tile num of 1st pixel - overridden for LPD
@property
def _pixel_shape(self):
"""Pixel (x, y) shape. Overridden for DSSC."""
return np.array([1., 1.], dtype=np.float64) * self.pixel_size
def __init__(self, modules, filename='No file'):
# List of lists (1 per module) of fragments (1 per tile)
self.modules = modules
# self.filename is metadata for plots, we don't read/write the file.
# There are separate methods for reading and writing.
self.filename = filename
self._snapped_cache = None
def _get_plot_scale_factor(self, axis_units):
if axis_units == 'm':
return 1
elif axis_units == 'px':
return 1 / self.pixel_size
else:
raise ValueError("axis_units must be 'px' or 'm', not {!r}"
.format(axis_units))
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Figure object.
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection, LineCollection
from matplotlib.patches import Polygon
scale = self._get_plot_scale_factor(axis_units)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
rects = []
first_rows = []
for module in self.modules:
for t, fragment in enumerate(module, start=1):
corners = fragment.corners()[:, :2] # Drop the Z dimension
rects.append(Polygon(corners * scale))
if t == self._draw_first_px_on_tile:
# Find the ends of the first row in reading order
c1 = fragment.corner_pos * scale
c2 = c1 + (fragment.fs_vec * fragment.fs_pixels * scale)
first_rows.append((c1[:2], c2[:2]))
# Add tile shapes
pc = PatchCollection(rects, facecolor=(0.75, 1.0, 0.75), edgecolor=None)
ax.add_collection(pc)
# Add markers for first pixels & lines for first row
first_rows = np.array(first_rows)
first_px_x, first_px_y = first_rows[:, 0, 0], first_rows[:, 0, 1]
ax.scatter(first_px_x, first_px_y, marker='x', label='First pixel')
ax.add_collection(LineCollection(
first_rows, linestyles=':', color='k', label='First row'
))
ax.legend()
cross_size = 0.02 * scale
# Draw cross in the centre.
ax.hlines(0, -cross_size, +cross_size, colors='0.75', linewidths=2)
ax.vlines(0, -cross_size, +cross_size, colors='0.75', linewidths=2)
if frontview:
ax.invert_xaxis()
ax.set_xlabel('metres' if axis_units == 'm' else 'pixels')
ax.set_ylabel('metres' if axis_units == 'm' else 'pixels')
return ax
@classmethod
def from_crystfel_geom(cls, filename):
"""Read a CrystFEL format (.geom) geometry file.
Returns a new geometry object.
"""
geom_dict = load_crystfel_geometry(filename)
modules = []
for p in range(cls.n_modules):
tiles = []
modules.append(tiles)
for a in range(cls.n_tiles_per_module):
d = geom_dict['panels']['p{}a{}'.format(p, a)]
tiles.append(GeometryFragment.from_panel_dict(d))
return cls(modules, filename=filename)
def write_crystfel_geom(self, filename, *,
data_path='/entry_1/instrument_1/detector_1/data',
mask_path=None, dims=('frame', 'modno', 'ss', 'fs'),
adu_per_ev=None, clen=None, photon_energy=None):
"""Write this geometry to a CrystFEL format (.geom) geometry file.
Parameters
----------
filename : str
Filename of the geometry file to write.
data_path : str
Path to the group that contains the data array in the hdf5 file.
Default: ``'/entry_1/instrument_1/detector_1/data'``.
mask_path : str
Path to the group that contains the mask array in the hdf5 file.
dims : tuple
Dimensions of the data. Extra dimensions, except for the defaults,
should be added by their index, e.g.
('frame', 'modno', 0, 'ss', 'fs') for raw data.
Default: ``('frame', 'modno', 'ss', 'fs')``.
Note: the dimensions must contain frame, ss, fs.
adu_per_ev : float
ADU (analog digital units) per electron volt for the considered
detector.
clen : float
Distance between sample and detector in meters
photon_energy : float
Beam wave length in eV
"""
write_crystfel_geom(
self, filename, data_path=data_path, mask_path=mask_path, dims=dims,
adu_per_ev=adu_per_ev, clen=clen, photon_energy=photon_energy,
)
if self.filename == 'No file':
self.filename = filename
def _snapped(self):
"""Snap geometry to a 2D pixel grid
This returns a new geometry object. The 'snapped' geometry is
less accurate, but can assemble data into a 2D array more efficiently,
because it doesn't do any interpolation.
"""
if self._snapped_cache is None:
new_modules = []
for module in self.modules:
new_tiles = [t.snap(px_shape=self._pixel_shape) for t in module]
new_modules.append(new_tiles)
self._snapped_cache = SnappedGeometry(new_modules, self)
return self._snapped_cache
@staticmethod
def split_tiles(module_data):
"""Split data from a detector module into tiles.
Must be implemented in subclasses.
"""
raise NotImplementedError
def output_array_for_position_fast(self, extra_shape=(), dtype=np.float32):
"""Make an empty output array to use with position_modules_fast
You can speed up assembling images by reusing the same output array:
call this once, and then pass the array as the ``out=`` parameter to
:meth:`position_modules_fast()`. By default, it allocates a new array on
each call, which can be slow.
Parameters
----------
extra_shape : tuple, optional
By default, a 2D output array is generated, to assemble a single
detector image. If you are assembling multiple pulses at once, pass
``extra_shape=(nframes,)`` to get a 3D output array.
dtype : optional (Default: np.float32)
"""
return self._snapped().make_output_array(extra_shape=extra_shape,
dtype=dtype)
def position_modules_fast(self, data, out=None):
"""Assemble data from this detector according to where the pixels are.
This approximates the geometry to align all pixels to a 2D grid.
Parameters
----------
data : ndarray
The last three dimensions should match the modules, then the
slow scan and fast scan pixel dimensions.
out : ndarray, optional
An output array to assemble the image into. By default, a new
array is allocated. Use :meth:`output_array_for_position_fast` to
create a suitable array.
If an array is passed in, it must match the dtype of the data and the
shape of the array that would have been allocated.
Parts of the array not covered by detector tiles are not overwritten.
In general, you can reuse an output array if you are assembling
similar pulses or pulse trains with the same geometry.
Returns
-------
out : ndarray
Array with one dimension fewer than the input.
The last two dimensions represent pixel y and x in the detector space.
centre : ndarray
(y, x) pixel location of the detector centre in this geometry.
"""
return self._snapped().position_modules(data, out=out)
def position_all_modules(self, data, out=None):
"""Deprecated alias for :meth:`position_modules_fast`"""
return self.position_modules_fast(data, out=out)
def plot_data_fast(self,
data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=True,
**kwargs):
"""Plot data from the detector using this geometry.
This approximates the geometry to align all pixels to a 2D grid.
Returns a matplotlib axes object.
Parameters
----------
data : ndarray
Should have exactly 3 dimensions, for the modules, then the
slow scan and fast scan pixel dimensions.
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
ax : `~matplotlib.axes.Axes` object, optional
Axes that will be used to draw the image. If None is given (default)
a new axes object will be created.
figsize : tuple
Size of the figure (width, height) in inches to be drawn
(default: (10, 10))
colorbar : bool, dict
Draw colobar with default values (if boolean is given). Colorbar
appearance can be controlled by passing a dictionary of properties.
kwargs :
Additional keyword arguments passed to `~matplotlib.imshow`
"""
return self._snapped().plot_data(
data, axis_units=axis_units, frontview=frontview, figsize=figsize,
ax=ax, colorbar=colorbar, **kwargs
)
@classmethod
def _distortion_array_slice(cls, m, t):
"""Which part of distortion array each tile is.
"""
# _tile_slice gives the slice for the tile within its module.
# The distortion array joins the modules along the slow-scan axis, so
# we need to offset the slow-scan slice to land in the correct module.
ss_slice_inmod, fs_slice = cls._tile_slice(t)
mod_px_ss = cls.expected_data_shape[1]
mod_offset = m * mod_px_ss
ss_slice = slice(
ss_slice_inmod.start + mod_offset, ss_slice_inmod.stop + mod_offset
)
return ss_slice, fs_slice
def to_distortion_array(self, allow_negative_xy=False):
"""Generate a distortion array for pyFAI from this geometry.
"""
nmods, mod_px_ss, mod_px_fs = self.expected_data_shape
ncorners = self._pixel_corners.shape[1]
distortion = np.zeros((nmods * mod_px_ss, mod_px_fs, ncorners, 3),
dtype=np.float32)
pixpos = self.get_pixel_positions(centre=False).reshape(
(nmods * mod_px_ss, mod_px_fs, 3)
)
px, py, pz = np.moveaxis(pixpos, -1, 0)
corner_ss_offsets = self._pixel_corners[0]
corner_fs_offsets = self._pixel_corners[1]
for m, mod in enumerate(self.modules, start=0):
for t, tile in enumerate(mod, start=0):
ss_unit_x, ss_unit_y, ss_unit_z = tile.ss_vec
fs_unit_x, fs_unit_y, fs_unit_z = tile.fs_vec
# Which part of the array is this tile?
tile_ss_slice, tile_fs_slice = self._distortion_array_slice(m, t)
# Get coordinates of each pixel's first corner
# 2D arrays, shape: (64, 128)
pixel_corner1_x = px[tile_ss_slice, tile_fs_slice]
pixel_corner1_y = py[tile_ss_slice, tile_fs_slice]
pixel_corner1_z = pz[tile_ss_slice, tile_fs_slice]
# Calculate corner coordinates for each pixel
# 3D arrays, shape: (64, 128, 4)
corners_x = (
pixel_corner1_x[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_x
+ corner_fs_offsets * fs_unit_x
)
corners_y = (
pixel_corner1_y[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_y
+ corner_fs_offsets * fs_unit_y
)
corners_z = (
pixel_corner1_z[:, :, np.newaxis]
+ corner_ss_offsets * ss_unit_z
+ corner_fs_offsets * fs_unit_z
)
# Insert the data into the array
distortion[tile_ss_slice, tile_fs_slice, :, 0] = corners_z
distortion[tile_ss_slice, tile_fs_slice, :, 1] = corners_y
distortion[tile_ss_slice, tile_fs_slice, :, 2] = corners_x
if not allow_negative_xy:
# Shift the x & y origin from the centre to the corner
min_yx = distortion[..., 1:].min(axis=(0, 1, 2))
distortion[..., 1:] -= min_yx
return distortion
@classmethod
def _tile_slice(cls, tileno):
"""Implement in subclass: which part of module array each tile is.
"""
raise NotImplementedError
def _module_coords_to_tile(self, slow_scan, fast_scan):
"""Implement in subclass: positions in module to tile numbers & pos in tile
"""
raise NotImplementedError
@classmethod
def _adjust_pixel_coords(cls, ss_coords, fs_coords, centre):
"""Called by get_pixel_positions; overridden by DSSC"""
if centre:
# A pixel is from n to n+1 in each axis, so centres are at n+0.5.
ss_coords += 0.5
fs_coords += 0.5
def get_pixel_positions(self, centre=True):
"""Get the physical coordinates of each pixel in the detector
The output is an array with shape like the data, with an extra dimension
of length 3 to hold (x, y, z) coordinates. Coordinates are in metres.
If centre=True, the coordinates are calculated for the centre of each
pixel. If not, the coordinates are for the first corner of the pixel
(the one nearest the [0, 0] corner of the tile in data space).
"""
out = np.zeros(self.expected_data_shape + (3,), dtype=np.float64)
# Prepare some arrays to use inside the loop
pixel_ss_coord, pixel_fs_coord = np.meshgrid(
np.arange(0, self.frag_ss_pixels, dtype=np.float64),
np.arange(0, self.frag_fs_pixels, dtype=np.float64),
indexing='ij'
)
# Shift coordinates from corner to centre if requested.
# This is also where the DSSC subclass shifts odd rows by half a pixel
self._adjust_pixel_coords(pixel_ss_coord, pixel_fs_coord, centre)
for m, mod in enumerate(self.modules, start=0):
for t, tile in enumerate(mod, start=0):
corner_x, corner_y, corner_z = tile.corner_pos
ss_unit_x, ss_unit_y, ss_unit_z = tile.ss_vec
fs_unit_x, fs_unit_y, fs_unit_z = tile.fs_vec
# Calculate coordinates of each pixel's first corner
# 2D arrays, shape: (64, 128)
pixels_x = (
corner_x
+ pixel_ss_coord * ss_unit_x
+ pixel_fs_coord * fs_unit_x
)
pixels_y = (
corner_y
+ pixel_ss_coord * ss_unit_y
+ pixel_fs_coord * fs_unit_y
)
pixels_z = (
corner_z
+ pixel_ss_coord * ss_unit_z
+ pixel_fs_coord * fs_unit_z
)
# Which part of the array is this tile?
tile_ss_slice, tile_fs_slice = self._tile_slice(t)
# Insert the data into the array
out[m, tile_ss_slice, tile_fs_slice, 0] = pixels_x
out[m, tile_ss_slice, tile_fs_slice, 1] = pixels_y
out[m, tile_ss_slice, tile_fs_slice, 2] = pixels_z
return out
def data_coords_to_positions(self, module_no, slow_scan, fast_scan):
"""Convert data array coordinates to physical positions
Data array coordinates are how you might refer to a pixel in an array
of detector data: module number, and indices in the slow-scan and
fast-scan directions. But coordinates in the two pixel dimensions aren't
necessarily integers, e.g. if they refer to the centre of a peak.
module_no, fast_scan and slow_scan should all be numpy arrays of the
same shape. module_no should hold integers, starting from 0,
so 0: Q1M1, 1: Q1M2, etc.
slow_scan and fast_scan describe positions within that module.
They may hold floats for sub-pixel positions. In both, 0.5 is the centre
of the first pixel.
Returns an array of similar shape with an extra dimension of length 3,
for (x, y, z) coordinates in metres.
.. seealso::
:doc:`agipd_geometry` demonstrates using this method.
"""
assert module_no.shape == slow_scan.shape == fast_scan.shape
# We want to avoid iterating over the positions in Python.
# So we assemble arrays of the corner position and step vectors for all
# tiles, and then use numpy indexing to select the relevant ones for
# each set of coordinates.
tiles_corner_pos = np.stack([
t.corner_pos for m in self.modules for t in m
])
tiles_ss_vec = np.stack([
t.ss_vec for m in self.modules for t in m
])
tiles_fs_vec = np.stack([
t.fs_vec for m in self.modules for t in m
])
# Convert coordinates within each module to coordinates in a tile
tilenos, tile_ss, tile_fs = self._module_coords_to_tile(slow_scan, fast_scan)
# The indexes of the relevant tiles in the arrays assembled above
all_tiles_ix = (module_no * self.n_tiles_per_module) + tilenos
# Select the relevant tile geometry for each set of coordinates
coords_tile_corner = tiles_corner_pos[all_tiles_ix]
coords_ss_vec = tiles_ss_vec[all_tiles_ix]
coords_fs_vec = tiles_fs_vec[all_tiles_ix]
# Calculate the physical coordinate for each data coordinate
return coords_tile_corner \
+ (np.expand_dims(tile_ss, -1) * coords_ss_vec) \
+ (np.expand_dims(tile_fs, -1) * coords_fs_vec)
class AGIPD_1MGeometry(DetectorGeometryBase):
"""Detector layout for AGIPD-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
pixel_size = 2e-4 # 2e-4 metres == 0.2 mm
frag_ss_pixels = 64
frag_fs_pixels = 128
expected_data_shape = (16, 512, 128)
n_modules = 16
n_tiles_per_module = 8
@classmethod
def from_quad_positions(cls, quad_pos, asic_gap=2, panel_gap=29,
unit=pixel_size):
"""Generate an AGIPD-1M geometry from quadrant positions.
This produces an idealised geometry, assuming all modules are perfectly
flat, aligned and equally spaced within their quadrant.
The quadrant positions are given in pixel units, referring to the first
pixel of the first module in each quadrant, corresponding to data
channels 0, 4, 8 and 12.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
To give positions in units other than pixels, pass the *unit* parameter
as the length of the unit in metres.
E.g. ``unit=1e-3`` means the coordinates are in millimetres.
"""
asic_gap_px = asic_gap * unit / cls.pixel_size
panel_gap_px = panel_gap * unit / cls.pixel_size
# How much space one tile takes up, including the gaps
# separating it from its neighbour.
# In the y dimension, 128 px + gap between modules
module_height = (cls.frag_fs_pixels + panel_gap_px) * cls.pixel_size
# In x, 64 px + gap between tiles (asics)
tile_width = (cls.frag_ss_pixels + asic_gap_px) * cls.pixel_size
quads_x_orientation = [1, 1, -1, -1]
quads_y_orientation = [-1, -1, 1, 1]
modules = []
for p in range(16):
quad = p // 4
quad_corner = quad_pos[quad]
x_orient = quads_x_orientation[quad]
y_orient = quads_y_orientation[quad]
p_in_quad = p % 4
corner_y = (quad_corner[1] * unit)\
- (p_in_quad * module_height)
tiles = []
modules.append(tiles)
for a in range(8):
corner_x = (quad_corner[0] * unit)\
+ x_orient * tile_width * a
tiles.append(GeometryFragment(
corner_pos=np.array([corner_x, corner_y, 0.]),
ss_vec=np.array([x_orient, 0, 0]) * unit,
fs_vec=np.array([0, y_orient, 0]) * unit,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
return cls(modules)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[4].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [0, 7]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('AGIPD-1M detector geometry ({})'.format(self.filename))
return ax
def compare(self, other, scale=1.0):
"""Show a comparison of this geometry with another in a 2D plot.
This shows the current geometry like :meth:`inspect`, with the addition
of arrows showing how each panel is shifted in the other geometry.
Parameters
----------
other : AGIPD_1MGeometry
A second geometry object to compare with this one.
scale : float
Scale the arrows showing the difference in positions.
This is useful to show small differences clearly.
"""
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon, FancyArrow
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
rects = []
arrows = []
for p, module in enumerate(self.modules):
for a, fragment in enumerate(module):
corners = fragment.corners()[:, :2] # Drop the Z dimension
corner1, corner1_opp = corners[0], corners[2]
rects.append(Polygon(corners))
if a in {0, 7}:
cx, cy, _ = fragment.centre()
ax.text(cx, cy, str(a),
verticalalignment='center',
horizontalalignment='center')
elif a == 4:
cx, cy, _ = fragment.centre()
ax.text(cx, cy, 'p{}'.format(p),
verticalalignment='center',
horizontalalignment='center')
panel2 = other.modules[p][a]
corners2 = panel2.corners()[:, :2]
corner2, corner2_opp = corners2[0], corners2[2]
dx, dy = corner2 - corner1
if not (dx == dy == 0):
sx, sy = corner1
arrows.append(FancyArrow(
sx, sy, scale * dx, scale * dy, width=5, head_length=4
))
dx, dy = corner2_opp - corner1_opp
if not (dx == dy == 0):
sx, sy = corner1_opp
arrows.append(FancyArrow(
sx, sy, scale * dx, scale * dy, width=5, head_length=5
))
pc = PatchCollection(rects, facecolor=(0.75, 1.0, 0.75), edgecolor=None)
ax.add_collection(pc)
ac = PatchCollection(arrows)
ax.add_collection(ac)
# Set axis limits to fit all shapes, with some margin
all_x = np.concatenate([s.xy[:, 0] for s in arrows + rects])
all_y = np.concatenate([s.xy[:, 1] for s in arrows + rects])
ax.set_xlim(all_x.min() - 20, all_x.max() + 20)
ax.set_ylim(all_y.min() - 40, all_y.max() + 20)
ax.set_title('Geometry comparison: {} → {}'
.format(self.filename, other.filename))
ax.text(1, 0, 'Arrows scaled: {}×'.format(scale),
horizontalalignment="right", verticalalignment="bottom",
transform=ax.transAxes)
return ax
def position_modules_interpolate(self, data):
"""Assemble data from this detector according to where the pixels are.
This performs interpolation, which is very slow.
Use :meth:`position_modules_fast` to get a pixel-aligned approximation
of the geometry.
Parameters
----------
data : ndarray
The three dimensions should be channelno, pixel_ss, pixel_fs
(lengths 16, 512, 128). ss/fs are slow-scan and fast-scan.
Returns
-------
out : ndarray
Array with the one dimension fewer than the input.
The last two dimensions represent pixel y and x in the detector space.
centre : ndarray
(y, x) pixel location of the detector centre in this geometry.
"""
assert data.shape == (16, 512, 128)
size_yx, centre = self._get_dimensions()
tmp = np.empty((16 * 8,) + size_yx, dtype=data.dtype)
for i, (module, mod_data) in enumerate(zip(self.modules, data)):
tiles_data = np.split(mod_data, 8)
for j, (tile, tile_data) in enumerate(zip(module, tiles_data)):
# We store (x, y, z), but numpy indexing, and hence affine_transform,
# work like [y, x]. Rearrange the numbers:
fs_vec_yx = tile.fs_vec[:2][::-1]
ss_vec_yx = tile.ss_vec[:2][::-1]
# Offset by centre to make all coordinates positive
corner_pos_yx = tile.corner_pos[:2][::-1] + centre
# Make the rotation matrix
rotn = np.stack((ss_vec_yx, fs_vec_yx), axis=-1)
# affine_transform takes a mapping from *output* to *input*.
# So we reverse the forward transformation.
transform = np.linalg.inv(rotn)
offset = np.dot(rotn, corner_pos_yx) # this seems to work, but is it right?
affine_transform(
tile_data,
transform,
offset=offset,
cval=np.nan,
output_shape=size_yx,
output=tmp[i * 8 + j],
)
# Silence warnings about nans - we expect gaps in the result
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
out = np.nanmax(tmp, axis=0)
return out, centre
def _get_dimensions(self):
"""Calculate appropriate array dimensions for assembling data.
Returns (size_y, size_x), (centre_y, centre_x)
"""
corners = []
for module in self.modules:
for tile in module:
corners.append(tile.corners())
corners = np.concatenate(corners)[:, :2] / self._pixel_shape
# Find extremes, add 1 px margin to allow for rounding errors
min_xy = corners.min(axis=0).astype(int) - 1
max_xy = corners.max(axis=0).astype(int) + 1
size = max_xy - min_xy
centre = -min_xy
# Switch xy -> yx
return tuple(size[::-1]), centre[::-1]
@staticmethod
def split_tiles(module_data):
# Split into 8 tiles along the slow-scan axis
return np.split(module_data, 8, axis=-2)
@classmethod
def _tile_slice(cls, tileno):
# Which part of the array is this tile?
# tileno = 0 to 7
tile_offset = tileno * cls.frag_ss_pixels
ss_slice = slice(tile_offset, tile_offset + cls.frag_ss_pixels)
fs_slice = slice(0, cls.frag_fs_pixels) # Every tile covers the full 128 pixels
return ss_slice, fs_slice
@classmethod
def _module_coords_to_tile(cls, slow_scan, fast_scan):
tileno, tile_ss = np.divmod(slow_scan, cls.frag_ss_pixels)
return tileno.astype(np.int16), tile_ss, fast_scan
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for AGIPD detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (8192, 128, 4, 3).
The dimensions mean:
- 8192 = 16 modules * 512 pixels (slow scan axis)
- 128 pixels (fast scan axis)
- 4 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy)
class SnappedGeometry:
"""Detector geometry approximated to align modules to a 2D grid
The coordinates used in this class are (y, x) suitable for indexing a
Numpy array; this does not match the (x, y, z) coordinates in the more
precise geometry above.
"""
def __init__(self, modules, geom: DetectorGeometryBase):
self.modules = modules
self.geom = geom
self.size_yx, self.centre = self._get_dimensions()
def make_output_array(self, extra_shape=(), dtype=np.float32):
"""Make an output array for self.position_modules()
"""
shape = extra_shape + self.size_yx
return np.full(shape, np.nan, dtype=dtype)
def position_modules(self, data, out=None):
"""Implementation for position_modules_fast
"""
assert data.shape[-3:] == self.geom.expected_data_shape
if out is None:
out = self.make_output_array(data.shape[:-3], data.dtype)
else:
assert out.shape == data.shape[:-3] + self.size_yx
if not np.can_cast(data.dtype, out.dtype, casting='safe'):
raise TypeError("{} cannot be safely cast to {}".
format(data.dtype, out.dtype))
for i, module in enumerate(self.modules):
mod_data = data[..., i, :, :]
tiles_data = self.geom.split_tiles(mod_data)
for j, tile in enumerate(module):
tile_data = tiles_data[j]
# Offset by centre to make all coordinates positive
y, x = tile.corner_idx + self.centre
h, w = tile.pixel_dims
out[..., y : y + h, x : x + w] = tile.transform(tile_data)
return out, self.centre
def _get_dimensions(self):
"""Calculate appropriate array dimensions for assembling data.
Returns (size_y, size_x), (centre_y, centre_x)
"""
corners = []
for module in self.modules:
for tile in module:
corners.append(tile.corner_idx)
corners.append(tile.opp_corner_idx)
corners = np.stack(corners)
# Find extremes
min_yx = corners.min(axis=0)
max_yx = corners.max(axis=0)
size = max_yx - min_yx
centre = -min_yx
return tuple(size), centre
def plot_data(self,
modules_data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=False,
**kwargs):
"""Implementation for plot_data_fast
"""
from matplotlib.cm import viridis
import matplotlib.pyplot as plt
if axis_units not in {'px', 'm'}:
raise ValueError("axis_units must be 'px' or 'm', not {!r}"
.format(axis_units))
res, centre = self.position_modules(modules_data)
min_y, min_x = -centre
max_y, max_x = np.array(res.shape) - centre
_extent = np.array((min_x - 0.5, max_x + 0.5, min_y - 0.5, max_y + 0.5))
cross_size = 20
if axis_units == 'm':
_extent *= self.geom.pixel_size
cross_size *= self.geom.pixel_size
# Use a dark grey for missing data
_cmap = copy(viridis)
_cmap.set_bad('0.25', 1.0)
kwargs.setdefault('cmap', _cmap)
kwargs.setdefault('extent', _extent)
kwargs.setdefault('origin', 'lower')
if ax is None:
fig = plt.figure(figsize=figsize or (10, 10))
ax = fig.add_subplot(1, 1, 1)
im = ax.imshow(res, **kwargs)
if isinstance(colorbar, dict) or colorbar is True:
if isinstance(colorbar, bool):
colorbar = {}
colorbar = plt.colorbar(im, ax=ax, **colorbar)
ax.set_xlabel('metres' if axis_units == 'm' else 'pixels')
ax.set_ylabel('metres' if axis_units == 'm' else 'pixels')
if frontview:
ax.invert_xaxis()
# Draw a cross at the centre
ax.hlines(0, -cross_size, +cross_size, colors='w', linewidths=1)
ax.vlines(0, -cross_size, +cross_size, colors='w', linewidths=1)
return ax
class LPD_1MGeometry(DetectorGeometryBase):
"""Detector layout for LPD-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
pixel_size = 5e-4 # 5e-4 metres == 0.5 mm
frag_ss_pixels = 32
frag_fs_pixels = 128
n_modules = 16
n_tiles_per_module = 16
expected_data_shape = (16, 256, 256)
_draw_first_px_on_tile = 8 # The first pixel in stored data is on tile 8
@classmethod
def from_quad_positions(cls, quad_pos, *, unit=1e-3, asic_gap=None,
panel_gap=None):
"""Generate an LPD-1M geometry from quadrant positions.
This produces an idealised geometry, assuming all modules are perfectly
flat, aligned and equally spaced within their quadrant.
The quadrant positions refer to the corner of each quadrant
where module 4, tile 16 is positioned.
This is the corner of the last pixel as the data is stored.
In the initial detector layout, the corner positions are for the top
left corner of the quadrant, looking along the beam.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
Parameters
----------
quad_pos: list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit: float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
asic_gap: float, optional
The gap between adjacent tiles/ASICs. The default is 4 pixels.
panel_gap: float, optional
The gap between adjacent modules/panels. The default is 4 pixels.
"""
px_conversion = unit / cls.pixel_size
asic_gap_px = 4 if (asic_gap is None) else asic_gap * px_conversion
panel_gap_px = 4 if (panel_gap is None) else panel_gap * px_conversion
# How much space one panel/module takes up, including the 'panel gap'
# separating it from its neighbour.
# In the x dimension, we have only one asic gap (down the centre)
panel_width = (256 + asic_gap_px + panel_gap_px) * cls.pixel_size
# In y, we have 7 gaps between the 8 ASICs in each column.
panel_height = (256 + (7 * asic_gap_px) + panel_gap_px) * cls.pixel_size
# How much space does one tile take up, including gaps to its neighbours?
tile_width = (cls.frag_fs_pixels + asic_gap_px) * cls.pixel_size
tile_height = (cls.frag_ss_pixels + asic_gap_px) * cls.pixel_size
# Size of a tile from corner to corner, excluding gaps
tile_size = np.array([cls.frag_fs_pixels, cls.frag_ss_pixels, 0]) * cls.pixel_size
panels_across = [-1, -1, 0, 0]
panels_up = [0, -1, -1, 0]
modules = []
for p in range(cls.n_modules):
quad = p // 4
quad_corner_x = quad_pos[quad][0] * unit
quad_corner_y = quad_pos[quad][1] * unit
p_in_quad = p % 4
# Top beam-left corner of panel
panel_corner_x = (quad_corner_x +
(panels_across[p_in_quad] * panel_width))
panel_corner_y = (quad_corner_y +
(panels_up[p_in_quad] * panel_height))
tiles = []
modules.append(tiles)
for a in range(cls.n_tiles_per_module):
if a < 8:
up = -a
across = -1
else:
up = -(15 - a)
across = 0
tile_last_corner = (
np.array([panel_corner_x, panel_corner_y, 0.0])
+ np.array([across, 0, 0]) * tile_width
+ np.array([0, up, 0]) * tile_height
)
tile_first_corner = tile_last_corner - tile_size
tiles.append(GeometryFragment(
corner_pos=tile_first_corner,
ss_vec=np.array([0, 1, 0]) * cls.pixel_size,
fs_vec=np.array([1, 0, 0]) * cls.pixel_size,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
return cls(modules)
@classmethod
def from_h5_file_and_quad_positions(cls, path, positions, unit=1e-3):
"""Load an LPD-1M geometry from an XFEL HDF5 format geometry file
The quadrant positions are not stored in the file, and must be provided
separately. By default, both the quadrant positions and the positions
in the file are measured in millimetres; the unit parameter controls
this.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
This version of the code only handles x and y translation,
as this is all that is recorded in the initial LPD geometry file.
Parameters
----------
path : str
Path of an EuXFEL format (HDF5) geometry file for LPD.
positions : list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit : float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
"""
assert len(positions) == 4
modules = []
with h5py.File(path, 'r') as f:
for Q, M in product(range(1, 5), range(1, 5)):
quad_pos = np.array(positions[Q - 1])
mod_grp = f['Q{}/M{}'.format(Q, M)]
mod_offset = mod_grp['Position'][:2]
tiles = []
for T in range(1, cls.n_modules+1):
corner_pos = np.zeros(3)
tile_offset = mod_grp['T{:02}/Position'.format(T)][:2]
corner_pos[:2] = quad_pos + mod_offset + tile_offset
# Convert units (mm) to metres
corner_pos *= unit
# LPD geometry is measured to the last pixel of each tile.
# Subtract tile dimensions for the position of 1st pixel.
ss_vec = np.array([0, 1, 0]) * cls.pixel_size
fs_vec = np.array([1, 0, 0]) * cls.pixel_size
first_px_pos = (corner_pos
- (ss_vec * cls.frag_ss_pixels)
- (fs_vec * cls.frag_fs_pixels))
tiles.append(GeometryFragment(
corner_pos=first_px_pos,
ss_vec=ss_vec,
fs_vec=fs_vec,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
modules.append(tiles)
return cls(modules, filename=path)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[0].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [7, 8, 15]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('LPD-1M detector geometry ({})'.format(self.filename))
return ax
@staticmethod
def split_tiles(module_data):
half1, half2 = np.split(module_data, 2, axis=-1)
# Tiles 1-8 (half1) are numbered top to bottom, whereas the array
# starts at the bottom. So we reverse their order after splitting.
return np.split(half1, 8, axis=-2)[::-1] + np.split(half2, 8, axis=-2)
@classmethod
def _tile_slice(cls, tileno):
# Which part of the array is this tile?
if tileno < 8: # First half of module (0 <= t <= 7)
fs_slice = slice(0, 128)
tiles_up = 7 - tileno
else: # Second half of module (8 <= t <= 15)
fs_slice = slice(128, 256)
tiles_up = tileno - 8
tile_offset = tiles_up * 32
ss_slice = slice(tile_offset, tile_offset + cls.frag_ss_pixels)
return ss_slice, fs_slice
@classmethod
def _module_coords_to_tile(cls, slow_scan, fast_scan):
tiles_across, tile_fs = np.divmod(fast_scan, cls.frag_fs_pixels)
tiles_up, tile_ss = np.divmod(slow_scan, cls.frag_ss_pixels)
# Each tiles_across is 0 or 1. To avoid iterating over the array with a
# conditional, multiply the number we want by 1 and the other by 0.
tileno = (
(1 - tiles_across) * (7 - tiles_up) # tileno 0-7
+ tiles_across * (tiles_up + 8) # tileno 8-15
)
return tileno.astype(np.int16), tile_ss, tile_fs
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for LPD detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (4096, 256, 4, 3).
The dimensions mean:
- 4096 = 16 modules * 256 pixels (slow scan axis)
- 256 pixels (fast scan axis)
- 4 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy)
def invert_xfel_lpd_geom(path_in, path_out):
"""Invert the coordinates in an XFEL geometry file (HDF5)
The initial geometry file for LPD was recorded with the coordinates
increasing down and to the right (looking in the beam direction), but the
standard XFEL coordinate scheme is the opposite, increasing upwards and to
the left (looking in beam direction).
This utility function reads one file, and writes a second with the
coordinates inverted.
"""
with h5py.File(path_in, 'r') as fin, h5py.File(path_out, 'x') as fout:
src_ds = fin['DetectorDescribtion']
dst_ds = fout.create_dataset('DetectorDescription', data=src_ds)
for k, v in src_ds.attrs.items():
dst_ds.attrs[k] = v
for Q, M in product(range(1, 5), range(1, 5)):
path = 'Q{}/M{}/Position'.format(Q, M)
fout[path] = -fin[path][:]
for T in range(1, 17):
path = 'Q{}/M{}/T{:02}/Position'.format(Q, M, T)
fout[path] = -fin[path][:]
class DSSC_1MGeometry(DetectorGeometryBase):
"""Detector layout for DSSC-1M
The coordinates used in this class are 3D (x, y, z), and represent metres.
You won't normally instantiate this class directly:
use one of the constructor class methods to create or load a geometry.
"""
# Hexagonal pixels, 236 μm step in fast-scan axis, 204 μm in slow-scan
pixel_size = 236e-6
frag_ss_pixels = 128
frag_fs_pixels = 256
n_modules = 16
n_tiles_per_module = 2
expected_data_shape = (16, 128, 512)
# This stretches the dimensions for the 'snapped' geometry so that its pixel
# grid matches the aspect ratio of the detector pixels.
_pixel_shape = np.array([1., 1.5/np.sqrt(3)], dtype=np.float64) * pixel_size
# Pixel corners described clockwise from the top, assuming the reference
# point for a pixel is outside it, aligned with the top point & left edge.
# The unit is the width of a pixel, 236 μm.
# The 4/3 extends the hexagons into the next row to correctly tessellate.
_pixel_corners = np.stack([
(np.array([0, 0.25, 0.75, 1, 0.75, 0.25]) * 4 / 3),
[0.5, 1, 1, 0.5, 0, 0]
])
@classmethod
def from_h5_file_and_quad_positions(cls, path, positions, unit=1e-3):
"""Load a DSSC geometry from an XFEL HDF5 format geometry file
The quadrant positions are not stored in the file, and must be provided
separately. The position given should refer to the bottom right (looking
along the beam) corner of the quadrant.
By default, both the quadrant positions and the positions
in the file are measured in millimetres; the unit parameter controls
this.
The origin of the coordinates is in the centre of the detector.
Coordinates increase upwards and to the left (looking along the beam).
This version of the code only handles x and y translation,
as this is all that is recorded in the initial LPD geometry file.
Parameters
----------
path : str
Path of an EuXFEL format (HDF5) geometry file for DSSC.
positions : list of 2-tuples
(x, y) coordinates of the last corner (the one by module 4) of each
quadrant.
unit : float, optional
The conversion factor to put the coordinates into metres.
The default 1e-3 means the numbers are in millimetres.
"""
assert len(positions) == 4
modules = []
quads_x_orientation = [-1, -1, 1, 1]
quads_y_orientation = [1, 1, -1, -1]
with h5py.File(path, 'r') as f:
for Q, M in product(range(1, 5), range(1, 5)):
quad_pos = np.array(positions[Q - 1])
mod_grp = f['Q{}/M{}'.format(Q, M)]
mod_offset = mod_grp['Position'][:2]
# Which way round is this quadrant
x_orient = quads_x_orientation[Q - 1]
y_orient = quads_y_orientation[Q - 1]
tiles = []
for T in range(1, 3):
corner_pos = np.zeros(3)
tile_offset = mod_grp['T{:02}/Position'.format(T)][:2]
corner_pos[:2] = quad_pos + mod_offset + tile_offset
# Convert units (mm) to metres
corner_pos *= unit
# Measuring in terms of the step within a row, the
# step to the next row of hexagons is 1.5/sqrt(3).
ss_vec = np.array([0, y_orient, 0]) * cls.pixel_size * 1.5/np.sqrt(3)
fs_vec = np.array([x_orient, 0, 0]) * cls.pixel_size
# Corner position is measured at low-x, low-y corner (bottom
# right as plotted). We want the position of the corner
# with the first pixel, which is either high-x low-y or
# low-x high-y.
if x_orient == -1:
first_px_pos = corner_pos - (fs_vec * cls.frag_fs_pixels)
else:
first_px_pos = corner_pos - (ss_vec * cls.frag_ss_pixels)
tiles.append(GeometryFragment(
corner_pos=first_px_pos,
ss_vec=ss_vec,
fs_vec=fs_vec,
ss_pixels=cls.frag_ss_pixels,
fs_pixels=cls.frag_fs_pixels,
))
modules.append(tiles)
return cls(modules, filename=path)
def inspect(self, axis_units='px', frontview=True):
"""Plot the 2D layout of this detector geometry.
Returns a matplotlib Axes object.
Parameters
----------
axis_units : str
Show the detector scale in pixels ('px') or metres ('m').
frontview : bool
If True (the default), x increases to the left, as if you were looking
along the beam. False gives a 'looking into the beam' view.
"""
ax = super().inspect(axis_units=axis_units, frontview=frontview)
scale = self._get_plot_scale_factor(axis_units)
# Label modules and tiles
for ch, module in enumerate(self.modules):
s = 'Q{Q}M{M}'.format(Q=(ch // 4) + 1, M=(ch % 4) + 1)
cx, cy, _ = module[0].centre() * scale
ax.text(cx, cy, s, fontweight='bold',
verticalalignment='center',
horizontalalignment='center')
for t in [1]:
cx, cy, _ = module[t].centre() * scale
ax.text(cx, cy, 'T{}'.format(t + 1),
verticalalignment='center',
horizontalalignment='center')
ax.set_title('DSSC detector geometry ({})'.format(self.filename))
return ax
@staticmethod
def split_tiles(module_data):
# Split into 2 tiles along the fast-scan axis
return np.split(module_data, 2, axis=-1)
def plot_data_fast(self,
data, *,
axis_units='px',
frontview=True,
ax=None,
figsize=None,
colorbar=False,
**kwargs):
ax = super().plot_data_fast(data,
axis_units=axis_units,
frontview=frontview,
ax=ax,
figsize=figsize,
colorbar=colorbar,
**kwargs)
# Squash image to physically equal aspect ratio, so a circle projected
# on the detector looks like a circle on screen.
ax.set_aspect(204/236.)
return ax
@classmethod
def _tile_slice(cls, tileno):
tile_offset = tileno * cls.frag_fs_pixels
fs_slice = slice(tile_offset, tile_offset + cls.frag_fs_pixels)
ss_slice = slice(0, cls.frag_ss_pixels) # Every tile covers the full pixel range
return ss_slice, fs_slice
def to_distortion_array(self, allow_negative_xy=False):
"""Return distortion matrix for DSSC detector, suitable for pyFAI.
Parameters
----------
allow_negative_xy: bool
If False (default), shift the origin so no x or y coordinates are
negative. If True, the origin is the detector centre.
Returns
-------
out: ndarray
Array of float 32 with shape (2048, 512, 6, 3).
The dimensions mean:
- 2048 = 16 modules * 128 pixels (slow scan axis)
- 512 pixels (fast scan axis)
- 6 corners of each pixel
- 3 numbers for z, y, x
"""
# Overridden only for docstring
return super().to_distortion_array(allow_negative_xy=allow_negative_xy)
@classmethod
def _adjust_pixel_coords(cls, ss_coords, fs_coords, centre):
# Shift odd-numbered rows by half a pixel.
fs_coords[1::2] -= 0.5
if centre:
# Vertical (slow scan) centre is 2/3 of the way to the start of the
# next row of hexagons, because the tessellating pixels extend
# beyond the start of the next row.
ss_coords += 2/3
fs_coords += 0.5
class DSSC_Geometry(DSSC_1MGeometry):
"""DEPRECATED: Use DSSC_1MGeometry instead"""
def __init__(self, modules, filename='No file'):
super().__init__(modules, filename)
warnings.warn(
"DSSC_Geometry has been renamed to DSSC_1MGeometry.", stacklevel=2
)
| {
"repo_name": "European-XFEL/h5tools-py",
"path": "karabo_data/geometry2/__init__.py",
"copies": "1",
"size": "60485",
"license": "bsd-3-clause",
"hash": -8078231280233923000,
"line_mean": 38.7888157895,
"line_max": 94,
"alpha_frac": 0.5580945452,
"autogenerated": false,
"ratio": 3.916019166019166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9972418984298717,
"avg_score": 0.0003389453840897239,
"num_lines": 1520
} |
a_global = 3
class Rectangle(object):
attribute_in_class = 3
class Foo:
def foo_meth(self):
class bla:
bla_attribute = "blattr"
print "foo"
foolocal = bla()
self.fooattribute = "fooattribute"
print "foo nested class"
rect_attribute = Foo()
rect_attribute.foo_meth()
def __init__(self, width, height):
Rectangle.another_width = 3
self.width = width
self.width = 123 # should be ignored by Attribute visitor
self.height = height
self.color = "red"
no_attribute = "bold"
def get_area(self):
global attribute_in_class
print "getArea ", self.attribute_in_class
no_attribute = 35
self.first_attr_in_tuple, self.second_attr_in_tuple, Rectangle.third_attr_in_tuple = "foo"
return self.width * self.height
area = property(get_area, doc='area of the rectangle')
print "Class scope attribute", attribute_in_class
a_global = 4
print "Global ", a_global
rect = Rectangle(10, 15)
print rect.width
print rect.area
print rect.attribute_in_class
# will ignore a_global -> we want attributes only
##r
# 11
# Rectangle attribute_in_class
# bla bla_attribute
# Foo fooattribute
# Rectangle rect_attribute
# Rectangle another_width
# Rectangle width
# Rectangle height
# Rectangle color
# Rectangle first_attr_in_tuple
# Rectangle second_attr_in_tuple
# Rectangle third_attr_in_tuple
# 9
# Rectangle attribute_in_class
# Rectangle rect_attribute
# Rectangle another_width
# Rectangle width
# Rectangle height
# Rectangle color
# Rectangle first_attr_in_tuple
# Rectangle second_attr_in_tuple
# Rectangle third_attr_in_tuple | {
"repo_name": "aptana/Pydev",
"path": "tests/org.python.pydev.refactoring.tests/src/python/visitor/attributevisitor/testGlobalLocal1.py",
"copies": "8",
"size": "1763",
"license": "epl-1.0",
"hash": 7661214025997893000,
"line_mean": 24.5652173913,
"line_max": 98,
"alpha_frac": 0.644356211,
"autogenerated": false,
"ratio": 3.650103519668737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8294459730668737,
"avg_score": null,
"num_lines": null
} |
""" A global cache for milter LDAP queries
"""
import datetime
class QueryCache(object):
""" A global cache for milter LDAP queries
"""
cache = {}
""" The cache """
@staticmethod
def get(directory_server, query):
""" Return a cached query
:param directory_server: The directory server, that runs the query
:param query: The query itself
:return: The query or None if it wasn't cached or has timed out
"""
if directory_server.id not in QueryCache.cache or\
query not in QueryCache.cache[directory_server.id]:
# That item isn't cached
return None
# Check, if the item has timed out
now = datetime.datetime.now()
then = QueryCache.cache[directory_server.id][query]["timestamp"]
timeout = QueryCache.cache[directory_server.id]["_timeout"]
if (now-then).total_seconds() > timeout:
return None
# Store the item
return QueryCache.cache[directory_server.id][query]["data"]
@staticmethod
def set(directory_server, query, data):
""" Add a query to the cache
:param directory_server: The directory server, that runs the query
:param query: The query itself
:param data: The data returned from the query
"""
now = datetime.datetime.now()
if directory_server.id not in QueryCache.cache:
# Create a basic directory server cache item and store the
# timeout value
QueryCache.cache[directory_server.id] = {
"_timeout": directory_server.cache_timeout
}
# Add the item to the cache
QueryCache.cache[directory_server.id][query] = {
"timestamp": now,
"data": data
}
@staticmethod
def flush():
""" Walk through the cache and remove timed out values
"""
now = datetime.datetime.now()
for directory_server_id in list(QueryCache.cache):
timeout = QueryCache.cache[directory_server_id]["_timeout"]
for query in list(QueryCache.cache[directory_server_id]):
if query == "_timeout":
continue
then = QueryCache.cache[directory_server_id][query]["timestamp"]
if (now-then).total_seconds() > timeout:
# The cache item has timed out. Remove it.
del(QueryCache.cache[directory_server_id][query])
if len(QueryCache.cache[directory_server_id]) == 1:
# There are no cache items left. Remove the directory server.
del(QueryCache.cache[directory_server_id]) | {
"repo_name": "dploeger/disclaimr",
"path": "disclaimr/query_cache.py",
"copies": "1",
"size": "2728",
"license": "mit",
"hash": 3392218367358194700,
"line_mean": 24.9904761905,
"line_max": 80,
"alpha_frac": 0.5835777126,
"autogenerated": false,
"ratio": 4.600337268128162,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0002817815078351477,
"num_lines": 105
} |
""""A global list of error identifiers that may be used.
The identifiers are gathered here for easy reference, for example when
writing an external error logger then it may be useful to know what
kinds of error identifiers to expect.
"""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdt_errident
#
# Public Assignments:
# CONDUIT_REFRESH
# GIT_SNOOP
# FETCH_PRUNE
# CONDUIT_CONNECT
# PUSH_DELETE_REVIEW
# PUSH_DELETE_TRACKING
# MARK_BAD_LAND
# MARK_BAD_ABANDONED
# MARK_BAD_IN_REVIEW
# MARK_NEW_BAD_IN_REVIEW
# MARK_BAD_PRE_REVIEW
# MARK_OK_IN_REVIEW
# MARK_OK_NEW_REVIEW
# PUSH_DELETE_LANDED
# PUSH_LANDING_ARCHIVE
# PUSH_ABANDONED_ARCHIVE
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#
# tryloop identifiers
#
# abdi_processrepos
CONDUIT_REFRESH = "conduit-refresh"
GIT_SNOOP = "git-snoop"
# abdi_processargs
FETCH_PRUNE = 'fetch-prune'
CONDUIT_CONNECT = 'conduit-connect'
# abdt_branch
PUSH_DELETE_REVIEW = 'push-delete-review'
PUSH_DELETE_TRACKING = 'push-delete-tracking'
MARK_BAD_LAND = 'mark-bad-land'
MARK_BAD_ABANDONED = 'mark-bad-abandoned'
MARK_BAD_IN_REVIEW = 'mark-bad-in-review'
MARK_NEW_BAD_IN_REVIEW = 'mark-new-bad-in-review'
MARK_BAD_PRE_REVIEW = 'mark-bad-pre-review'
MARK_OK_IN_REVIEW = 'mark-ok-in-review'
MARK_OK_NEW_REVIEW = 'mark-ok-new-review'
PUSH_DELETE_LANDED = 'push-delete-landed'
PUSH_LANDING_ARCHIVE = 'push-landing-archive'
PUSH_ABANDONED_ARCHIVE = 'push-abandoned-archive'
# -----------------------------------------------------------------------------
# Copyright (C) 2014 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "bloomberg/phabricator-tools",
"path": "py/abd/abdt_errident.py",
"copies": "4",
"size": "2610",
"license": "apache-2.0",
"hash": -5612355224501910000,
"line_mean": 31.625,
"line_max": 79,
"alpha_frac": 0.60651341,
"autogenerated": false,
"ratio": 3.4342105263157894,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 80
} |
"""A global module containing functions for managing the project."""
__author__ = 'wittawat'
import fsic
import os
try:
import cPickle as pickle
except:
import pickle
def get_root():
"""Return the full path to the root of the package"""
return os.path.abspath(os.path.dirname(fsic.__file__))
def result_folder():
"""Return the full path to the result/ folder containing experimental result
files"""
return os.path.join(get_root(), 'result')
def data_folder():
"""
Return the full path to the data folder
"""
return os.path.join(get_root(), 'data')
def data_file(*relative_path):
"""
Access the file under the data folder. The path is relative to the
data folder
"""
dfolder = data_folder()
return os.path.join(dfolder, *relative_path)
def load_data_file(*relative_path):
fpath = data_file(*relative_path)
return pickle_load(fpath)
def ex_result_folder(ex):
"""Return the full path to the folder containing result files of the specified
experiment.
ex: a positive integer. """
rp = result_folder()
fpath = os.path.join(rp, 'ex%d'%ex )
if not os.path.exists(fpath):
os.mkdir(fpath)
return fpath
def create_dirs(full_path):
"""Recursively create the directories along the specified path.
Assume that the path refers to a folder. """
if not os.path.exists(full_path):
os.makedirs(full_path)
def ex_result_file(ex, *relative_path ):
"""Return the full path to the file identified by the relative path as a list
of folders/files under the result folder of the experiment ex. """
rf = ex_result_folder(ex)
return os.path.join(rf, *relative_path)
def ex_save_result(ex, result, *relative_path):
"""Save a dictionary object result for the experiment ex. Serialization is
done with pickle.
EX: ex_save_result(1, result, 'data', 'result.p'). Save under result/ex1/data/result.p
EX: ex_save_result(1, result, 'result.p'). Save under result/ex1/result.p
"""
fpath = ex_result_file(ex, *relative_path)
dir_path = os.path.dirname(fpath)
create_dirs(dir_path)
#
with open(fpath, 'w') as f:
# expect result to be a dictionary
pickle.dump(result, f)
def ex_load_result(ex, *relative_path):
"""Load a result identified by the path from the experiment ex"""
fpath = ex_result_file(ex, *relative_path)
return pickle_load(fpath)
def ex_file_exists(ex, *relative_path):
"""Return true if the result file in under the specified experiment folder
exists"""
fpath = ex_result_file(ex, *relative_path)
return os.path.isfile(fpath)
def pickle_load(fpath):
if not os.path.isfile(fpath):
raise ValueError('%s does not exist' % fpath)
with open(fpath, 'r') as f:
# expect a dictionary
result = pickle.load(f)
return result
| {
"repo_name": "Diviyan-Kalainathan/causal-humans",
"path": "Cause-effect/lib/fsic/glo.py",
"copies": "3",
"size": "2891",
"license": "mit",
"hash": 7400615233114957000,
"line_mean": 29.4315789474,
"line_max": 91,
"alpha_frac": 0.6599792459,
"autogenerated": false,
"ratio": 3.5824039653035937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5742383211203593,
"avg_score": null,
"num_lines": null
} |
# A global var. to remember autodiscover state.
LOADING = False
class StepAlreadyRegistered(Exception):
pass
class StepNotFound(Exception):
pass
class Registry(object):
_registry = {}
def add_step(self, name, func):
# Check if the plugin is already registered.
if func.__name__ in self._registry:
raise StepAlreadyRegistered('The step %s has already \
been registered.' % func.__name__)
self._registry[func.__name__] = {'name': name, 'func': func}
def list_steps(self):
# Return a list of all plugins
return self._registry
def get_step(self, name):
# We use this inner function to defer the Exception if a plugin
# doesn't exist (we want to store this in the Build steps)
def get_inner_step():
# Get the plugin
step = self._registry.get(name, {}).get('func', None)
if not step:
# No plugin found, raise an error.
raise StepNotFound('Step %s not found.' % name)
return step
# Return the function.
return get_inner_step
# Singleton instance of registry
registry = Registry()
def autodiscover():
# Check if we already autodiscovering.
global LOADING
if LOADING:
return
LOADING = True
# Late import some functions to do autodiscovery.
import imp
from django.utils.importlib import import_module
from django.conf import settings
# Check every installed app.
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
# We assume that plugins are stored in a build_steps module within
# the app.
try:
imp.find_module('build_steps', app_path)
except ImportError:
continue
# Load to build steps module to trigger the register call.
import_module('%s.build_steps' % app)
LOADING = False
# Just do the autodiscovery.
autodiscover()
| {
"repo_name": "stephrdev/loetwerk",
"path": "journeyman/buildrunner/registry.py",
"copies": "1",
"size": "2064",
"license": "mit",
"hash": 5154279870106959000,
"line_mean": 28.0704225352,
"line_max": 74,
"alpha_frac": 0.6104651163,
"autogenerated": false,
"ratio": 4.372881355932203,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5483346472232203,
"avg_score": null,
"num_lines": null
} |
"""A globe is used to represent a given state of the world."""
from existenz.location import Location
from existenz.util_decorator import memoize
class Globe(object):
"""A Globe is used to represent a given state of the world."""
def __init__(self, size=5):
self._size = size
self._total_locations = size * size
self._locations = []
for x_coord in range(0, self._size):
for y_coord in range(0, self._size):
location = Location(x_coord, y_coord)
self._locations.append(location)
@property
def locations(self):
"""A list of the locations in a given globe."""
return self._locations
@property
def size(self):
"""The size of a side of the globe."""
return self._size
def get_location(self, x_coord, y_coord):
"""Retrieve a given location from given coordinates.
:param x_coord: The abscissa of the coordinate.
:type x_coord: int
:param y_coord: The ordinate of the coordinate.
:type y_coord: int
:return: The location for the given coordinates.
:rtype: existenz.location.Location
"""
index = (x_coord * self._size) + y_coord
x_out_of_bound = x_coord < 0 or x_coord >= self._size
y_out_of_bound = y_coord < 0 or y_coord >= self._size
if index > self._total_locations or x_out_of_bound or y_out_of_bound:
raise IndexError('No coordinate (%s, %s)' % (x_coord, y_coord))
return self._locations[index]
@memoize
def get_neighbors(self, x_coord, y_coord):
"""Retrieve the locations adjacent to the given coordinates.
:param x_coord: The abscissa of the coordinates.
:type x_coord: int
:param y_coord: The ordinate of the coordinates.
:type y_coord: int
:return: A list of neighbors locations.
:rtype: list(existenz.location.Location)
"""
return list(self.locations[index] for index in
self._neighbors(x_coord, y_coord))
def _neighbors(self, x_coord, y_coord):
"""Calculates the neighbors to a given coordinate.
:param x_coord: The abscissa of the target coordinate.
:type x_coord: int
:param y_coord: The ordinate of the target coordinate.
:type y_coord: int
:return: list
"""
indexes = list()
for x_inc in [-1, 0, 1]:
for y_inc in [-1, 0, 1]:
if x_inc == 0 and y_inc == 0:
# Skip the central location.
continue
x_ordinate = (x_coord + x_inc) % self.size
y_ordiante = (y_coord + y_inc) % self.size
index = (self.size * x_ordinate) + y_ordiante
indexes.append(index)
return indexes
| {
"repo_name": "neoinsanity/existenz",
"path": "existenz/globe.py",
"copies": "1",
"size": "2850",
"license": "apache-2.0",
"hash": 3554721693191029000,
"line_mean": 35.0759493671,
"line_max": 77,
"alpha_frac": 0.5722807018,
"autogenerated": false,
"ratio": 3.8358008075370122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4908081509337012,
"avg_score": null,
"num_lines": null
} |
# A glob module for genomespace paths
# Used to handle wildcard searches in a manner compatible with
# standard file globbing
import fnmatch
import os
import re
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
GENOMESPACE_URL_REGEX = re.compile(
r'(http[s]?://.*/datamanager/(v[0-9]+.[0-9]+/)?file/)(\w+)/(\w+)')
# Regex for shell wildcards. Identical to standard globs except for '?' which
# cannot be used in a url since it denotes the start of a query string
MAGIC_CHECK = re.compile('[*[]')
def is_genomespace_url(url):
return bool(GENOMESPACE_URL_REGEX.match(url))
def is_same_genomespace_server(url1, url2):
match1 = GENOMESPACE_URL_REGEX.match(url1)
match2 = GENOMESPACE_URL_REGEX.match(url2)
return match1 and match2 and match1.group(1) == match2.group(1)
def gs_path_split(genomespace_url):
query_str = urlparse(genomespace_url).query
if query_str:
query_str = "?" + query_str
genomespace_url = genomespace_url.replace(query_str, "")
dirname, basename = os.path.split(genomespace_url)
return dirname, basename, query_str
def has_magic(s):
"""
Checks whether a given string contains shell wildcard characters
"""
return MAGIC_CHECK.search(s) is not None
def find_magic_match(s):
"""
Returns the position of a wildcard
"""
return MAGIC_CHECK.search(s)
def gs_iglob(client, gs_path):
"""
Returns an iterator which yields genomespace paths matching a given
pattern.
E.g.
https://dm.genomespace.org/datamanager/v1.0/file/Home/folder1/*.txt
would return all files in folder1 with a txt extension such as:
https://dm.genomespace.org/datamanager/v1.0/file/Home/folder1/a.txt
https://dm.genomespace.org/datamanager/v1.0/file/Home/folder1/b.txt
Matches Python glob module characteristics except for '?' which is
unsupported.
"""
# Ignore query_str while globbing, but add it back before returning
dirname, basename, query_str = gs_path_split(gs_path)
if not is_genomespace_url(dirname):
return
if not has_magic(gs_path):
if basename:
yield gs_path
else:
# Patterns ending with a slash should match only directories
if client.isdir(dirname):
yield gs_path
return
if has_magic(dirname):
dirs = gs_iglob(client, dirname)
else:
dirs = [dirname]
if has_magic(basename):
glob_in_dir = _glob1
else:
glob_in_dir = _glob0
for dirname in dirs:
for name in glob_in_dir(client, dirname, basename):
yield dirname + "/" + name + query_str
# See python glob module implementation, which this is closely based on
def _glob1(client, dirname, pattern):
if client.isdir(dirname):
listing = client.list(dirname + "/")
names = [entry.name for entry in listing.contents]
return fnmatch.filter(names, pattern)
else:
return []
def _glob0(client, dirname, basename):
if basename == '':
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if client.isdir(dirname):
return [basename]
else:
return [basename]
return []
| {
"repo_name": "gvlproject/python-genomespaceclient",
"path": "genomespaceclient/gs_glob.py",
"copies": "1",
"size": "3337",
"license": "mit",
"hash": -8555704988381880000,
"line_mean": 28.5309734513,
"line_max": 77,
"alpha_frac": 0.6559784237,
"autogenerated": false,
"ratio": 3.687292817679558,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4843271241379558,
"avg_score": null,
"num_lines": null
} |
""" Agnostic API for sailplay.ru """
__version__ = "0.2.0"
__project__ = "sailplay"
__author__ = "Kirill Klenov <horneds@gmail.com>"
__license__ = "BSD"
import logging
import requests as rs
from contextlib import contextmanager
from copy import deepcopy
logger = logging.getLogger('sailplay')
rs_logger = logging.getLogger('requests')
# Dirty code for annoying sailplay API
SAILPLAY_API_DEFAULT_VERSION = 'v2'
SAILPLAY_API_SCHEME = dict(
(method, version)
for version, methods in {
'v1': ('login', 'purchases', 'ecommerce'),
'v2': ('gifts', 'users', 'events', 'points', 'partners', 'basket'),
}.items()
for method in methods
)
class SailPlayException(Exception):
pass
class SailPlayAPI(object):
""" Proxy SailPlay API. """
def __init__(self, client):
self.__client = client
if not self.__client.params.get('token'):
self.__client.login()
self.session = []
def __getattr__(self, name):
self.session.append(name)
return self
__getitem__ = __getattr__
def __call__(self, *args, **data):
""" Call API. """
url = '/'.join(self.session)
return self.__client.get(url, data=data)
class SailPlayClient(object):
""" SailPlay client. """
api_url = 'https://sailplay.ru/api'
error = SailPlayException
def __init__(self, pin_code, store_department_id, store_department_key,
token=None, silence=False, loglevel='INFO'):
self.params = locals()
@property
def token(self):
return self.params.get('token', '')
@property
def credentials(self):
return dict(
pin_code=self.params.get('pin_code'),
store_department_id=self.params.get('store_department_id'),
store_department_key=self.params.get('store_department_key'),
token=self.params.get('token'),
)
def login(self):
""" Get API token. """
json = self.get('login')
self.params['token'] = json.get('token')
return json
def request(self, method, url, data=None):
""" Request sailplay API. """
action = url.split('/')[0]
version = SAILPLAY_API_SCHEME.get(action, 'v2')
url = "%s/%s/%s/" % (
self.api_url, version, url.strip('/'))
params = dict(self.credentials)
loglevel = self.params.get('loglevel', 'INFO')
logger.setLevel(loglevel.upper())
rs_logger.setLevel(loglevel.upper())
if data and method == 'GET':
params.update(data)
try:
response = rs.api.request(method, url, params=params, data=data)
response.raise_for_status()
except rs.HTTPError as exc:
raise self.error(exc)
json = response.json()
if json['status'] == 'error' and not self.params['silence']:
raise self.error(json['message'].encode('utf-8'))
logger.debug(json)
return json
def get(self, *args, **kwargs):
""" Proxy to method get. """
return self.request('GET', *args, **kwargs)
def post(self, *args, **kwargs):
""" Proxy to method get. """
return self.request('POST', *args, **kwargs)
@property
def api(self):
""" Return proxy to self client. """
return SailPlayAPI(client=self)
@contextmanager
def ctx(self, **params):
""" Enter context. """
_params = deepcopy(self.params)
try:
self.params.update(params)
yield self
finally:
self.params = _params
# pylama:ignore=D
| {
"repo_name": "klen/sailplay",
"path": "sailplay.py",
"copies": "1",
"size": "3632",
"license": "bsd-3-clause",
"hash": -4927140060038735000,
"line_mean": 25.5109489051,
"line_max": 76,
"alpha_frac": 0.5699339207,
"autogenerated": false,
"ratio": 3.7137014314928427,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9783635352192843,
"avg_score": 0,
"num_lines": 137
} |
""" a goaway lock """
import random
import threading
import thread
import uuid
import logging
import goaway.globalvars as globalvars
import goaway.rpc as rpc
logger = logging.getLogger(__name__)
class Lock(object):
"""A GoAway Lock.
All locks are centralized for full ordering of acquire and releases.
Supports usage as a context manager.
"""
def __init__(self, name):
self.name = name # All acquires happen on the same name
logger.debug("lock init [%s] on process [%s]", self.name, globalvars.proc_uuid)
def get_uuid(self):
# Globally unique identifier of this (thread, process) tuple.
return "{}:{}".format(globalvars.proc_uuid, str(thread.get_ident()))
def acquire(self):
""" blocks """
data = {"uuid": self.get_uuid(),
"name": self.name}
while True:
resj = rpc.rpc("POST", self._master_url("lock/acquire"), data)
if resj["ok"] == "ok":
return
def release(self):
""" doesn't block """
thread = threading.Thread(target=self._release_sync)
thread.daemon = True
thread.start()
def _release_sync(self):
""" sends release notice """
data = {"uuid": self.get_uuid(),
"name": self.name}
resj = rpc.rpc("POST", self._master_url("lock/release"), data)
def __enter__(self):
self.acquire()
def __exit__(self, exc_type, exc_value, traceback):
self.release()
# Propagate exceptions.
return False
def _master_url(self, url_subpath):
"""Create a URL for contacting the data master."""
# The lock master is the first server listed in the config.
master_server = globalvars.config.servers[0]
return "http://{}:{}/{}".format(master_server.host, master_server.port, url_subpath)
| {
"repo_name": "anpere/goaway",
"path": "goaway/datatypes/lock.py",
"copies": "1",
"size": "1870",
"license": "mit",
"hash": 7371649368180832000,
"line_mean": 30.1666666667,
"line_max": 92,
"alpha_frac": 0.5935828877,
"autogenerated": false,
"ratio": 3.9368421052631577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5030424992963157,
"avg_score": null,
"num_lines": null
} |
"A Google Calendar Parser"
from datetime import datetime, date, timedelta
from time import strptime, mktime
from xml.sax.saxutils import unescape
from urllib2 import urlopen
# From Requirements.txt
from pytz import timezone
from icalendar.cal import Calendar, Event
from BeautifulSoup import BeautifulStoneSoup, Tag
TIME_FORMATS = (
"%a %b %d, %Y %I:%M%p",
"%a %b %d, %Y %I%p",
"%a %b %d, %Y",
"%Y-%m-%dT%H:%M:%S"
)
def _parse_time(time_str, reference_date=None):
"""\
Parses a calendar time string, and outputs a datetime object of the specified time.
Only compatible with the time formats listed in the TIME_FORMATS tuple.
'reference_date' is another time-string, used when the original time_str doesn't contain any date information.
"""
time_struct = None
if len(time_str.split()) == 1:
if "." in time_str:
time_str = time_str.rsplit('.', 1)[0]
else:
assert reference_date, "Hour-only time strings need a reference date string."
time_str = " ".join(reference_date.split()[:4]) + " " + time_str
for time_format in TIME_FORMATS:
try:
time_struct = strptime(time_str, time_format)
except ValueError:
pass
if time_struct == None:
raise ValueError("Unsopported time string format: %s" % (time_str))
return datetime.fromtimestamp(mktime(time_struct))
def _fix_timezone(datetime_obj, time_zone):
"""\
Adjusts time relative to the calendar's timezone,
then removes the datetime object's timezone property.
"""
if type(datetime_obj) is datetime and datetime_obj.tzinfo is not None:
return datetime_obj.astimezone(time_zone).replace(tzinfo=None)
elif type(datetime_obj) is date:
return datetime(datetime_obj.year, datetime_obj.month, datetime_obj.day)
return datetime_obj
def _multi_replace(string, replace_dict):
"Replaces multiple items in a string, where replace_dict consists of {value_to_be_removed: replced_by, etc...}"
for key, value in replace_dict.iteritems():
string = string.replace(str(key), str(value))
return string
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, basestring):
if not isinstance(obj, unicode):
obj = unicode(obj, encoding)
return obj
def _normalize(data_string, convert_whitespace=False):
"Removes various markup artifacts and returns a normal python string."
new_string = unescape(to_unicode_or_bust(data_string))
new_string = _multi_replace(new_string, {
' ': ' ', '"': '"', '¦': '|', "'": "'", "\\": ""
})
new_string = new_string.strip()
if convert_whitespace:
return " ".join(new_string.split())
return new_string
class CalendarEvent(dict):
"""\
A modified dictionary that allows accessing and modifying the main properties of a calendar event
as both attributes, and dictionary keys; i.e. 'event["name"]' is the same as using 'event.name'
Only the following event-specific properties may be accessed/modified as attributes:
"name", "description", "location", "start_time", "end_time", "all_day",
"repeats", "repeat_freq", "repeat_day", "repeat_month", "repeat_until"
CalendarEvents may also be compared using the >, >=, <, <=, comparison operators, which compare
the starting times of the events.
"""
__slots__ = ( "name", "description", "location", "start_time", "end_time", "all_day",
"repeats", "repeat_freq", "repeat_day", "repeat_month", "repeat_until" )
def __getattr__(self, key):
if key in self.__slots__:
return self[key]
else:
return dict.__getattribute__(self, key)
def __setattr__(self, key, value):
if key in self.__slots__:
self[key] = value
else:
raise AttributeError("dict attributes are not modifiable.")
def __lt__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] < other["start_time"]
def __le__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] <= other["start_time"]
def __gt__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] > other["start_time"]
def __ge__(self, other):
assert type(other) is CalendarEvent, "Both objects must be CalendarEvents to compare."
return self["start_time"] >= other["start_time"]
class CalendarParser(object):
"""\
A practical calendar parser for Google Calendar's two output formats: XML, and iCal (.ics).
Stores events as a list of dictionaries with self-describing attributes.
Accepts url resources as well as local xml/ics files.
Certain fields/properties are not available when parsing ics resources.
"""
# TODO: Accept calendarIDs and support google's REST api
def __init__(self, ics_url=None, xml_url=None, ics_file=None, xml_file=None):
self.ics_file = ics_file
self.ics_url = ics_url
self.xml_file = xml_file
self.xml_url = xml_url
self.time_zone = None
self.calendar = None
self.title = ""
self.subtitle = ""
self.author = ""
self.email = ""
self.last_updated = None
self.date_published = None
self.events = []
def __len__(self):
return len(self.events)
def __iter__(self):
return self.events.__iter__()
def __reversed__(self):
return reversed(self.events)
def __contains__(self, item):
if type(item) is not str:
return item in self.events
for event in self.events:
if event["name"].lower() == item.lower():
return True
return False
def __getitem__(self, item):
if type(item) is str:
event_list = []
for event in self.events:
if event["name"].lower() == item.lower():
event_list.append(event)
if len(event_list) == 0:
raise LookupError("'%s' is not an event in this calendar." % (item))
if len(event_list) == 1:
return event_list[0]
else:
return event_list
else:
return self.events[item]
def keys(self):
"Returns the names of all the parsed events, which may be used as lookup-keys on the parser object."
return [event["name"] for event in self.events]
def sort_by_latest(self, sort_in_place=False):
"Returns a list of the parsed events, where the newest events are listed first."
sorted_events = sorted(self.events, reverse=True)
if sort_in_place:
self.events = sorted_events
return sorted_events
def sort_by_oldest(self, sort_in_place=False):
"Returns a list of the parsed events, where the oldest events are listed first."
sorted_events = sorted(self.events)
if sort_in_place:
self.events = sorted_events
return sorted_events
def fetch_calendar(self, force_xml=False, force_ics=False):
"Fetches the calendar data from an XML/.ics resource in preperation for parsing."
cal_data = None
if self.xml_url:
cal_data = urlopen(self.xml_url)
elif self.ics_url:
cal_data = urlopen(self.ics_url)
elif self.xml_file:
cal_data = open(self.xml_file, "rb")
elif self.ics_file:
cal_data = open(self.ics_file, "rb")
else:
raise UnboundLocalError("No calendar url or file path has been set.")
cal_str = cal_data.read()
cal_data.close()
if (self.xml_url or self.xml_file) and not force_ics:
self.calendar = BeautifulStoneSoup(_normalize(cal_str, True))
elif (self.ics_url or self.ics_file) and not force_xml:
self.calendar = Calendar.from_ical(cal_str)
return self.calendar
def parse_xml(self, overwrite_events=True):
"Returns a generator of Event dictionaries from an XML atom feed."
assert self.xml_url or self.xml_url, "No xml resource has been set."
self.calendar = self.fetch_calendar(force_xml=True).contents[1]
metadata = self.calendar.contents[1:3]
self.title = metadata[1].contents[0].contents[0]
self.subtitle = metadata[1].contents[1].next
self.author = metadata[1].contents[6].next.next.next
self.email = metadata[1].contents[6].next.contents[1].next
self.time_zone = timezone(metadata[1].contents[6].contents[5].attrs[0][1])
self.last_updated = _parse_time(metadata[0].next)
self.date_published = _parse_time(
metadata[1].contents[6].contents[5].next.next.contents[1].next)
raw_events = self.calendar.contents[3:]
if overwrite_events:
self.events = []
for event in raw_events:
event_dict = CalendarEvent()
event_dict["name"] = _normalize(event.next.next)
event_dict["repeats"] = False
for content in event.contents[2]:
if isinstance(content, Tag):
content = content.contents[0]
if "Recurring Event" in content:
event_dict["repeats"] = True
elif event_dict["repeats"]:
if "First start:" in content:
rep_info = content.split()[2:-1]
rep_date = rep_info[0].split('-')
# Not enough info to determine how often the event repeats...
#event_dict['repeat_month'] = rep_date[1] # "YEARLY"
#event_dict['repeat_day'] = rep_date[2] # "MONTHLY"
rep_date = map(int, rep_date)
if len(rep_info) == 2:
rep_time = map(int, rep_info[1].split(':'))
event_dict["start_time"] = datetime( *(rep_date + rep_time) )
else:
event_dict["start_time"] = datetime(*rep_date)
elif "Duration:" in content:
seconds = int(content.split()[-1])
event_dict["end_time"] = event_dict["start_time"] + timedelta(seconds=seconds)
elif "When: " in content:
when = event.contents[1].next.replace("When: ", "", 1)
if len(when.split()) > 4:
# Remove the timezone
when = when.rsplit(" ", 1)[0]
when = when.split(" to ")
if len(when) == 2:
start, end = when
event_dict["end_time"] = _parse_time(end, start)
else:
start = when[0]
event_dict["start_time"] = _parse_time(start)
if not "end_time" in event_dict \
and event_dict["start_time"].hour == 0 \
and event_dict["start_time"].minute == 0:
event_dict["all_day"] = True
event_dict["end_time"] = event_dict["start_time"] + timedelta(days=1)
else:
event_dict["all_day"] = False
elif "Where: " in content:
event_dict["location"] = _normalize(content).replace("Where: ", "")
elif "Event Description: " in content:
event_dict["description"] = _normalize(content).replace("Event Description: ", "")
if overwrite_events:
self.events.append(event_dict)
yield event_dict
def parse_ics(self, overwrite_events=True):
"Returns a generator of Event dictionaries from an iCal (.ics) file."
assert self.ics_url or self.ics_url, "No ics resource has been set."
# Returns an icalendar.Calendar object.
self.fetch_calendar(force_ics=True)
self.time_zone = timezone(str(self.calendar["x-wr-timezone"]))
self.title = str(self.calendar["x-wr-calname"])
if overwrite_events:
self.events = []
for event in self.calendar.walk():
if isinstance(event, Event):
event_dict = CalendarEvent()
if "SUMMARY" in event:
event_dict["name"] = _normalize(event["summary"])
if "DESCRIPTION" in event:
event_dict["description"] = _normalize(event["description"])
if "LOCATION" in event and event["location"]:
event_dict["location"] = _normalize(event["location"])
if "DTSTART" in event:
event_dict["start_time"] = _fix_timezone(event["dtstart"].dt, self.time_zone)
if "DTEND" in event:
event_dict["end_time"] = _fix_timezone(event["dtend"].dt, self.time_zone)
if event_dict["start_time"].hour == 0 \
and event_dict["start_time"].minute == 0 \
and (event_dict["end_time"] - event_dict["start_time"]) == timedelta(days=1):
event_dict["all_day"] = True
else:
event_dict["all_day"] = False
event_dict["repeats"] = False
if "RRULE" in event:
rep_dict = event["RRULE"]
event_dict["repeats"] = True
event_dict["repeat_freq"] = rep_dict["FREQ"][0]
if event_dict["repeat_freq"] == "YEARLY":
event_dict["repeat_day"] = event_dict["start_time"].day
event_dict["repeat_month"] = event_dict["start_time"].month
if "BYDAY" in rep_dict:
event_dict["repeat_day"] = rep_dict["BYDAY"][0]
elif "BYMONTHDAY" in rep_dict:
event_dict["repeat_day"] = rep_dict["BYMONTHDAY"][0]
if "BYMONTH" in rep_dict:
event_dict["repeat_month"] = rep_dict["BYMONTH"][0]
if "UNTIL" in rep_dict:
event_dict["repeat_until"] = _fix_timezone(rep_dict["UNTIL"][0], self.time_zone)
if overwrite_events:
self.events.append(event_dict)
yield event_dict
def parse_calendar(self, force_list=False, use_xml=False, use_ics=False, overwrite_events=True):
"Parses the calendar at the specified resource path. Returns a generator of CalendarEvents."
generator = None
if (self.ics_url or self.ics_file) and (use_ics or not use_xml):
generator = self.parse_ics(overwrite_events)
elif (self.xml_url or self.xml_file) and (use_xml or not use_ics):
generator = self.parse_xml(overwrite_events)
if force_list:
return [event for event in generator]
else:
return generator
| {
"repo_name": "dominicmeroux/Reading-In-and-Analyzing-Calendar-Data-by-Interfacing-Between-MySQL-and-Python",
"path": "calendar_parser.py",
"copies": "1",
"size": "15492",
"license": "mit",
"hash": -2630800464869314000,
"line_mean": 37.8270676692,
"line_max": 115,
"alpha_frac": 0.5562225665,
"autogenerated": false,
"ratio": 4.112556410937085,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5168778977437085,
"avg_score": null,
"num_lines": null
} |
"""A google Music skill."""
from random import shuffle
from gmusicapi import Mobileclient
name = 'Google Music'
examples = 'search for jimmy hendrix, play library, shuffle library.'
api = Mobileclient()
logged_in = False
def results_to_tracks(results, app):
"""Convert a list of results to a list of track instances."""
for data in results:
track = app.track_class(
data.get('artist', 'Unknown Artist'),
data.get('album', 'Unknown Album'),
data.get('title', 'Unknown Track'),
play_track
)
track.data = data
track.app = app
yield track
def play_track(track):
"""Play a track."""
id = track.data.get(
'storeId',
track.data.get(
'nid',
track.data.get(
'trackId',
track.data.get(
'id'
)
)
)
)
url = api.get_stream_url(id)
track.app.play_from_url(url)
def on_activate(app):
"""The skill has been activated."""
global logged_in
if logged_in is False:
if 'google_email' in app.config and 'google_password' in app.config:
if app.config['android_id']:
id = app.config['android_id']
else:
id = api.FROM_MAC_ADDRESS
app.output('Connecting to google...')
logged_in = api.login(
app.config['google_email'],
app.config['google_password'],
id
)
if logged_in is True:
app.output('Logged in successfully')
else:
app.output(
'Failed to login. Please ensure your credentials are \
correct in the configuration'
)
else:
app.output('There are no google credentials saved in the \
configuration. Exit then edit google_email and google_password in \
the configuration file %s.' % app.config_filename)
app.config['google_email'] = ''
app.config['google_password'] = ''
app.config['android_id'] = 0
if logged_in is False:
return True # Don't activate the skill.
phrases = {}
def play_library(app, match):
"""Add all songs from the library."""
verb = match.groups()[0]
app.output('Loading Google Music library...')
library = api.get_all_songs()
app.output('%sing library.' % verb)
if verb == 'shuffle':
shuffle(library)
app.clear_tracks()
for track in results_to_tracks(library, app):
app.add_track(track)
app.set_stream_position(-1)
phrases['(shuffle|play) .*library$'] = play_library
def do_search(app, match):
"""Search the music library."""
verb, search = match.groups()
if search.startswith('for'):
search = search[3:].strip()
app.output('Searching for %s' % search)
results = api.search(search)['song_hits']
results = [r['track'] for r in results]
if results:
app.clear_tracks()
for track in results_to_tracks(
results,
app
):
app.add_track(track)
app.stop_stream()
return 'Playing results for %s' % search
phrases['(find|search|hunt) ([^$]+)$'] = do_search
| {
"repo_name": "chrisnorman7/helper",
"path": "skills/google_music.py",
"copies": "1",
"size": "3471",
"license": "mpl-2.0",
"hash": 861068548204634400,
"line_mean": 27.1680672269,
"line_max": 79,
"alpha_frac": 0.5211754538,
"autogenerated": false,
"ratio": 4.151913875598086,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5173089329398086,
"avg_score": null,
"num_lines": null
} |
"""A GO term, A, can be represented as DAG_a = (A, T_a, E_a), aka a GoSubDag"""
__copyright__ = "Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
__author__ = "DV Klopfenstein"
## import timeit
## from goatools.godag.prttime import prt_hms
class DagA:
"""A GO term, A, can be represented as DAG_a = (A, T_a, E_a), aka a GoSubDag"""
def __init__(self, go_a, ancestors, go2depth, w_e, godag):
self.go_a = go_a
self.ancestors = ancestors
#tic = timeit.default_timer()
self.goids = self._init_goids()
#prt_hms(tic, '\nDagA INIT GO IDs')
self.go2svalue = self._init_go2svalue(go2depth, w_e, godag)
#prt_hms(tic, 'DagA SVALUES')
def get_sv(self):
"""Get the semantic value of GO term A"""
return sum(self.go2svalue.values())
def get_svalues(self, goids):
"""Get svalues for given IDs"""
s_go2svalue = self.go2svalue
return [s_go2svalue[go] for go in goids]
def _init_go2svalue(self, go2depth, w_e, godag):
"""S-value: the contribution of GO term, t, to the semantics of GO term, A"""
#tic = timeit.default_timer()
go2svalue = {self.go_a: 1.0}
if not self.ancestors:
return go2svalue
terms_a = self.goids
w_r = {r:v for r, v in w_e.items() if r != 'is_a'}
#prt_hms(tic, 'DagA edge weights wo/is_a')
for ancestor_id in self._get_sorted(go2depth):
goterm = godag[ancestor_id]
weight = w_e['is_a']
svals = [weight*go2svalue[o.item_id] for o in goterm.children if o.item_id in terms_a]
for rel, weight in w_r.items():
if rel in goterm.relationship_rev:
for cobj in goterm.relationship_rev[rel]:
if cobj.item_id in terms_a:
svals.append(weight*go2svalue[cobj.item_id])
if svals:
go2svalue[ancestor_id] = max(svals)
## print(ancestor_id, max(svals))
return go2svalue
def _get_sorted(self, go2depth):
"""Get the sorted ancestors"""
#tic = timeit.default_timer()
go2dep = {go:go2depth[go] for go in self.ancestors}
go_dep = sorted(go2dep.items(), key=lambda t: t[1], reverse=True)
gos, _ = zip(*go_dep)
#prt_hms(tic, 'DagA SORTED')
return gos
def _init_goids(self):
"""Return all GO IDs in GO_a's GODAG"""
goids = set(self.ancestors)
goids.add(self.go_a)
return goids
# Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
| {
"repo_name": "tanghaibao/goatools",
"path": "goatools/semsim/termwise/dag_a.py",
"copies": "1",
"size": "2624",
"license": "bsd-2-clause",
"hash": -1373124626002087400,
"line_mean": 36.4857142857,
"line_max": 98,
"alpha_frac": 0.5697408537,
"autogenerated": false,
"ratio": 2.9450056116722783,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8989312083146284,
"avg_score": 0.005086876445199129,
"num_lines": 70
} |
"""A GQL-based viewer for the Google App Engine Datastore."""
import cgi
import logging
import os
import re
import sys
import time
import urllib
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
import ndb
FORM = """\
<html>
<head>
<title>Data Viewer - %(APPLICATION_ID)s - %(CURRENT_VERSION_ID)s</title>
</head>
<body>
<form method=get action=/dataviewer>
<input type=text size=100 name=query value="%(query)s">
<input type=submit>
</form>
<p style="font-weight:bold">%(error)s</p>
%(next)s
%(data)s
</body>
</html>
"""
class DataViewer(webapp.RequestHandler):
@ndb.toplevel
def get(self):
conn = ndb.make_connection(default_model=ndb.Expando)
ndb.set_context(ndb.make_context(conn=conn))
params = dict(os.environ)
params['error'] = ''
params['data'] = ''
query_string = self.request.get('query')
page_size = int(self.request.get('page') or 10)
start_cursor = self.request.get('cursor')
params['query'] = query_string or 'SELECT *'
params['next'] = ''
if query_string:
prefix = 'parsing'
try:
query = ndb.gql(query_string)
prefix = 'binding'
query.bind()
prefix = 'execution'
cursor = None
if start_cursor:
try:
cursor = ndb.Cursor.from_websafe_string(start_cursor)
except Exception:
pass
results, cursor, more = query.fetch_page(page_size,
start_cursor=cursor)
except Exception, err:
params['error'] = '%s error: %s.%s: %s' % (prefix,
err.__class__.__module__,
err.__class__.__name__,
err)
else:
if not results:
params['error'] = 'No query results'
else:
columns = set()
rows = []
for result in results:
if isinstance(result, ndb.Key):
rows.append({'__key__': repr(result)})
else:
row = {'__key__': repr(result._key)}
for name, prop in sorted(result._properties.iteritems()):
columns.add(name)
values = prop.__get__(result)
row[name] = repr(values)
rows.append(row)
data = []
data.append('<table border=1>')
data.append('<thead>')
data.append('<tr>')
columns = ['__key__'] + sorted(columns)
for col in columns:
data.append(' <th>%s</th>' % cgi.escape(col))
data.append('</tr>')
data.append('</thead>')
data.append('<tbody>')
for row in rows:
data.append('<tr>')
for col in columns:
if col not in row:
data.append(' <td></td>')
else:
data.append(' <td>%s</td>' % cgi.escape(row[col]))
data.append('</tr>')
data.append('</tbody>')
data.append('</table>')
params['data'] = '\n '.join(data)
if more:
next = ('<a href=/dataviewer?%s>Next</a>' %
urllib.urlencode([('query', query_string),
('cursor', cursor.to_websafe_string()),
('page', page_size),
]))
params['next'] = next
self.response.out.write(FORM % params)
urls = [
('/dataviewer', DataViewer),
]
app = webapp.WSGIApplication(urls)
def main():
util.run_wsgi_app(app)
if __name__ == '__main__':
main()
| {
"repo_name": "bslatkin/8-bits",
"path": "appengine-ndb/demo/dataviewer.py",
"copies": "1",
"size": "3739",
"license": "apache-2.0",
"hash": -8574546823150788000,
"line_mean": 28.2109375,
"line_max": 77,
"alpha_frac": 0.4872960685,
"autogenerated": false,
"ratio": 4.05531453362256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5042610602122559,
"avg_score": null,
"num_lines": null
} |
# a grab-bag of simple optimization techniques
# while we rely on the C compiler to do the heavy lifting,
# it's reluctant to do anything to floating point arithmetic
# in case it changes the behavior of the program. We have
# a freer hand because
# a) this is for drawing pretty pictures, not calculating missile
# trajectories or anything
# b) we always ignore overflow, NaN, etc
import instructions
import graph
Nothing = 0
Peephole = 1
ConstantPropagation = 2
class FlowGraph:
"Builds a control flow graph from a sequence of instructions"
def __init__(self):
pass
def build(self, insns):
self.control = graph.T()
self.define = {}
self.use = {}
for insn in insns:
if isinstance(insn, instructions.Insn):
n = self.control.newNode()
for d in insn.dest():
self.define.setdefault(n,[]).append(d)
for s in insn.source():
self.use.setdefault(n,[]).append(s)
class T:
"Holds overall optimization logic"
def __init__(self):
pass
def peephole_binop(self, insn):
left = insn.src[0]
right = insn.src[1]
if isinstance(left, instructions.ConstArg):
if isinstance(right, instructions.ConstArg):
# both args constant, replace with new const
return insn.const_eval()
else:
const_index = 0
other_index = 1
else:
if isinstance(right, instructions.ConstArg):
const_index = 1
other_index = 0
else:
# neither are constant, we can't do anything
return insn
if insn.op == "*":
# 1 * n => n
if insn.src[const_index].is_one():
return instructions.Move(
[insn.src[other_index]],
insn.dst)
# 0 * n -> 0
if insn.src[const_index].is_zero():
return instructions.Move(
[insn.src[const_index]],
insn.dst)
return insn
def peephole_insn(self, insn):
"""Return an optimized version of this instruction, if possible,
or the same instruction if no change is practical."""
if isinstance(insn, instructions.Binop):
return self.peephole_binop(insn)
else:
return insn
def peephole(self, insns):
"""Perform some straightforward algebraic simplifications
which don't require any global knowledge."""
out_insns = []
for insn in insns:
result = self.peephole_insn(insn)
if result:
out_insns.append(result)
return out_insns
def constant_propagation(self, insns):
"""if we have:
t = const
a = t op b
Then we can replace that with
a = const op b
This is done using dataflow analysis."""
def optimize(self, flags, insns):
if (flags & Peephole):
insns = self.peephole(insns)
return insns
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4d/optimize.py",
"copies": "1",
"size": "3195",
"license": "bsd-3-clause",
"hash": -9207362253584944000,
"line_mean": 27.7837837838,
"line_max": 72,
"alpha_frac": 0.5436619718,
"autogenerated": false,
"ratio": 4.271390374331551,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.531505234613155,
"avg_score": null,
"num_lines": null
} |
"""a graphical config manager for StaSh"""
import os
import ast
import threading
import console
import ui
from stash.system.shcommon import _STASH_CONFIG_FILES
import pythonista_add_action as paa
_stash = globals()["_stash"]
ORIENTATIONS = ("landscape", "landscape_left", "landscape_right")
# define option types
TYPE_BOOL = 1
TYPE_INT = 2
TYPE_STR = 3
TYPE_FILE = 4 # NotImplemented
TYPE_COLOR = 5
TYPE_CHOICE = 6
TYPE_LABEL = 7
TYPE_COMMAND = 9
CONFIG_PATH = os.path.join(
os.getenv("STASH_ROOT"), # not using shcommons._STASH_ROOT here
_STASH_CONFIG_FILES[0],
)
# define functions for commands
@ui.in_background
def visit_homepage():
"""opens the StaSh homepage."""
mv = cfg_view # [global] the main view
mv.subview_open = True
v = ui.WebView()
v.present()
v.load_url("https://www.github.com/ywangd/stash/")
v.wait_modal()
mv.subview_open = False
@ui.in_background
def add_editor_action():
"""
adds an editor action to the 'wrench' menu in the editor which
launches launch_stash.py
"""
mv = cfg_view # [global] the main view
mv.ai.start()
try:
lsp = "/launch_stash.py" # TODO: auto-detect
paa.add_action(
lsp,
"monitor",
"000000",
"StaSh",
)
paa.save_defaults()
finally:
mv.ai.stop()
# define all options as a dict of:
# section -> list of dicts of
# display_name: str
# option_name: str
# type: int
OPTIONS = {
"system": [
{
"display_name": "Resource File",
"option_name": "rcfile",
"type": TYPE_STR,
},
{
"display_name": "Show Traceback",
"option_name": "py_traceback",
"type": TYPE_BOOL,
},
{
"display_name": "Enable Debugger",
"option_name": "py_pdb",
"type": TYPE_BOOL,
},
{
"display_name": "Encode Input as UTF-8",
"option_name": "ipython_style_history_search",
"type": TYPE_BOOL,
},
{
"display_name": "Thread Type",
"option_name": "thread_type",
"type": TYPE_CHOICE,
"choices": ("ctypes", "traced"),
},
],
"display":
[
{
"display_name": "Font Size",
"option_name": "TEXT_FONT_SIZE",
"type": TYPE_INT,
},
{
"display_name": "Button Font Size",
"option_name": "BUTTON_FONT_SIZE",
"type": TYPE_INT,
},
{
"display_name": "Background Color",
"option_name": "BACKGROUND_COLOR",
"type": TYPE_COLOR,
},
{
"display_name": "Text Color",
"option_name": "TEXT_COLOR",
"type": TYPE_COLOR,
},
{
"display_name": "Tint Color",
"option_name": "TINT_COLOR",
"type": TYPE_COLOR,
},
{
"display_name": "Indicator Style",
"option_name": "INDICATOR_STYLE",
"type": TYPE_CHOICE,
"choices": (
"default",
"black",
"white",
),
},
{
"display_name": "Max History Length",
"option_name": "HISTORY_MAX",
"type": TYPE_INT,
},
{
"display_name": "Max Buffer",
"option_name": "BUFFER_MAX",
"type": TYPE_INT,
},
{
"display_name": "Max Autocompletion",
"option_name": "AUTO_COMPLETION_MAX",
"type": TYPE_INT,
},
{
"display_name": "Virtual Keys",
"option_name": "VK_SYMBOLS",
"type": TYPE_STR,
},
],
"StaSh": [
{
"display_name": "Version",
"option_name": None,
"type": TYPE_LABEL,
"value": _stash.__version__,
},
{
"display_name": "Update",
"option_name": None,
"type": TYPE_COMMAND,
"command": "selfupdate",
},
{
"display_name": "Create Editor Shortcut",
"option_name": None,
"type": TYPE_COMMAND,
"command": add_editor_action,
},
{
"display_name": "Visit Homepage",
"option_name": None,
"type": TYPE_COMMAND,
"command": "webviewer -f -m https://www.github.com/ywangd/stash/",
},
],
}
# section order
SECTIONS = [
"StaSh",
"system",
"display",
]
class ColorPicker(object):
"""
This object will prompt the user for a color.
Parts of this are copied from the pythonista examples.
TODO: rewrite as a subclass of ui.View()
"""
def __init__(self, default=(0.0, 0.0, 0.0)):
self.r, self.g, self.b, = default
self.view = ui.View()
self.view.background_color = "#ffffff"
self.rslider = ui.Slider()
self.rslider.continuous = True
self.rslider.value = default[0]
self.rslider.tint_color = "#ff0000"
self.gslider = ui.Slider()
self.gslider.continuous = True
self.gslider.value = default[1]
self.gslider.tint_color = "#00ff00"
self.bslider = ui.Slider()
self.bslider.continuous = True
self.bslider.value = default[2]
self.bslider.tint_color = "#0000ff"
self.preview = ui.View()
self.preview.background_color = self.rgb
self.preview.border_width = 1
self.preview.border_color = "#000000"
self.preview.corner_radius = 5
self.rslider.action = self.gslider.action = self.bslider.action = self.slider_action
self.colorlabel = ui.Label()
self.colorlabel.text = self.hexcode
self.colorlabel.alignment = ui.ALIGN_CENTER
self.view.add_subview(self.rslider)
self.view.add_subview(self.gslider)
self.view.add_subview(self.bslider)
self.view.add_subview(self.preview)
self.view.add_subview(self.colorlabel)
w = self.view.width / 2.0
self.preview.width = w - (w / 10.0)
self.preview.x = w / 10.0
hd = self.view.height / 10.0
self.preview.height = (self.view.height / 3.0) * 2.0 - (hd * 2)
self.preview.y = hd
self.preview.flex = "BRWH"
self.colorlabel.x = self.preview.x
self.colorlabel.y = (hd * 2) + self.preview.height
self.colorlabel.height = (self.view.height / 3.0) * 2.0 - (hd * 2)
self.colorlabel.width = self.preview.width
self.colorlabel.flex = "BRWH"
self.rslider.x = self.gslider.x = self.bslider.x = w * 1.1
self.rslider.width = self.gslider.width = self.bslider.width = w * 0.8
self.rslider.flex = self.gslider.flex = self.bslider.flex = "LWHTB"
h = self.view.height / 9.0
self.rslider.y = h * 2
self.gslider.y = h * 4
self.bslider.y = h * 6
self.rslider.height = self.gslider.height = self.bslider.height = h
def slider_action(self, sender):
"""called when a slider was moved"""
self.r = self.rslider.value
self.g = self.gslider.value
self.b = self.bslider.value
self.preview.background_color = self.rgb
self.colorlabel.text = self.hexcode
@property
def hexcode(self):
"""returns the selected color as a html-like hexcode"""
hexc = "#%.02X%.02X%.02X" % self.rgb_255
return hexc
@property
def rgb(self):
"""
returns the selected color as a tuple line (1.0, 1.0, 1.0)
"""
return (self.r, self.g, self.b)
@property
def rgb_255(self):
"""
returns the selected color as a rgb tuple like (255, 255, 255)
"""
r, g, b = self.rgb
return (r * 255, g * 255, b * 255)
def get_color(self):
"""
shows the view, wait until it is closed and the. return the selected color.
"""
self.view.present(
"sheet",
orientations=ORIENTATIONS,
)
self.view.wait_modal()
return self.rgb
class ConfigView(ui.View):
"""
The main GUI.
"""
def __init__(self):
ui.View.__init__(self)
self.background_color = "#ffffff"
self.table = ui.TableView()
self.table.delegate = self.table.data_source = self
self.table.flex = "WH"
self.add_subview(self.table)
self.ai = ui.ActivityIndicator()
self.ai.style = ui.ACTIVITY_INDICATOR_STYLE_WHITE_LARGE
self.ai.hides_when_stopped = True
self.ai.x = self.width / 2.0 - (self.ai.width / 2.0)
self.ai.y = self.height / 2.0 - (self.ai.height / 2.0)
self.ai.flex = "LRTB"
self.ai.background_color = "#000000"
self.ai.alpha = 0.7
self.ai.corner_radius = 5
self.add_subview(self.ai)
self.subview_open = False
self.cur_tf = None
self.hide_kb_button = ui.ButtonItem(
"Hide Keyboard",
action=self.hide_keyboard,
enabled=False,
)
self.right_button_items = (self.hide_kb_button,)
def show(self):
"""shows the view and starts a thread."""
self.present(orientations=ORIENTATIONS)
# launch a background thread
# we can not use ui.in_background here
# because some dialogs would not open anymoe
thr = threading.Thread(target=self.show_messages)
thr.daemon = True
thr.start()
def show_messages(self):
"""shows some warnings and tips."""
console.alert(
"Info",
"If StaSh does not launch anymore after you changed the config, run the 'launch_stash.py' script with \n'--no-cfgfile'.",
"Ok",
hide_cancel_button=True,
)
while True:
self.wait_modal()
if not self.subview_open:
break
console.alert(
"Info",
"Some changes may only be visible after restarting StaSh and/or Pythonista.",
"Ok",
hide_cancel_button=True,
)
# data source and delegate functions. see docs
def tableview_number_of_sections(self, tv):
return len(SECTIONS)
def tableview_number_of_rows(self, tv, section):
sn = SECTIONS[section]
return len(OPTIONS[sn])
def tableview_cell_for_row(self, tv, section, row):
sn = SECTIONS[section]
info = OPTIONS[sn][row]
otype = info["type"]
if otype == TYPE_LABEL:
cell = ui.TableViewCell("value1")
cell.detail_text_label.text = str(info["value"])
else:
cell = ui.TableViewCell("default")
cell.flex = ""
if otype == TYPE_BOOL:
switch = ui.Switch()
switch.value = _stash.config.getboolean(
sn, info["option_name"]
)
i = (sn, info["option_name"])
callback = lambda s, self=self, i=i: self.switch_changed(s, i)
switch.action = callback
cell.content_view.add_subview(switch)
switch.y = (cell.height / 2.0) - (switch.height / 2.0)
switch.x = (cell.width - switch.width) - (cell.width / 20)
switch.flex = "L"
elif otype == TYPE_CHOICE:
seg = ui.SegmentedControl()
seg.segments = info["choices"]
try:
cur = _stash.config.get(sn, info["option_name"])
curi = seg.segments.index(cur)
except:
curi = -1
seg.selected_index = curi
i = (sn, info["option_name"])
callback = lambda s, self=self, i=i: self.choice_changed(s, i)
seg.action = callback
cell.content_view.add_subview(seg)
seg.y = (cell.height / 2.0) - (seg.height / 2.0)
seg.x = (cell.width - seg.width) - (cell.width / 20)
seg.flex = "LW"
elif otype == TYPE_COLOR:
b = ui.Button()
rawcolor = _stash.config.get(sn, info["option_name"])
color = ast.literal_eval(rawcolor)
rgb255color = color[0] * 255, color[1] * 255, color[2] * 255
b.background_color = color
b.title = "#%.02X%.02X%.02X" % rgb255color
b.tint_color = ((0, 0, 0) if color[0] >= 0.5 else (1, 1, 1))
i = (sn, info["option_name"])
callback = lambda s, self=self, i=i: self.choose_color(s, i)
b.action = callback
cell.content_view.add_subview(b)
b.width = (cell.width / 6.0)
b.height = ((cell.height / 4.0) * 3.0)
b.y = (cell.height / 2.0) - (b.height / 2.0)
b.x = (cell.width - b.width) - (cell.width / 20)
b.flex = "LW"
b.border_color = "#000000"
b.border_width = 1
elif otype in (TYPE_STR, TYPE_INT):
tf = ui.TextField()
tf.alignment = ui.ALIGN_RIGHT
tf.autocapitalization_type = ui.AUTOCAPITALIZE_NONE
tf.autocorrection_type = False
tf.clear_button_mode = "while_editing"
tf.text = _stash.config.get(sn, info["option_name"])
tf.delegate = self
i = (sn, info["option_name"])
callback = lambda s, self=self, i=i: self.str_entered(s, i)
tf.action = callback
if otype == TYPE_STR:
tf.keyboard_type = ui.KEYBOARD_DEFAULT
elif otype == TYPE_INT:
tf.keyboard_type = ui.KEYBOARD_NUMBER_PAD
tf.flex = "LW"
cell.add_subview(tf)
tf.width = (cell.width / 6.0)
tf.height = ((cell.height / 4.0) * 3.0)
tf.y = (cell.height / 2.0) - (tf.height / 2.0)
tf.x = (cell.width - tf.width) - (cell.width / 20)
elif otype == TYPE_FILE:
# incomplete!
b = ui.Button()
fp = _stash.config.get(sn, info["option_name"])
fn = fp.replace(os.path.dirname(fp), "", 1)
b.title = fn
i = (sn, info["option_name"])
callback = lambda s, self=self, i=i, f=fp: self.choose_file(s, i, f)
b.action = callback
cell.content_view.add_subview(b)
b.width = (cell.width / 6.0)
b.height = ((cell.height / 4.0) * 3.0)
b.y = (cell.height / 2.0) - (b.height / 2.0)
b.x = (cell.width - b.width) - (cell.width / 20)
b.flex = "LWH"
elif otype == TYPE_COMMAND:
b = ui.Button()
b.title = info["display_name"]
cmd = info["command"]
if isinstance(cmd, (str, unicode)):
f = lambda c=cmd: _stash(c, add_to_history=False)
else:
f = lambda c=cmd: cmd()
callback = lambda s, self=self, f=f: self.run_func(f)
b.action = callback
cell.content_view.add_subview(b)
b.flex = "WH"
b.frame = cell.frame
cell.remove_subview(cell.text_label)
if otype != TYPE_COMMAND:
title = info["display_name"]
else:
title = ""
cell.text_label.text = title
return cell
def tableview_title_for_header(self, tv, section):
return SECTIONS[section].capitalize()
def tableview_can_delete(self, tv, section, row):
return False
def tableview_can_move(self, tv, section, row):
return False
def tableview_did_select(self, tv, section, row):
# deselect row
tv.selected_row = (-1, -1)
def textfield_did_begin_editing(self, tf):
self.cur_tf = tf
self.hide_kb_button.enabled = True
def keyboard_frame_did_change(self, frame):
"""called when the keyboard appears/disappears."""
h = frame[3]
self.table.height = self.height - h
if h == 0:
self.hide_kb_button.enabled = False
def save(self):
"""saves the config."""
with open(CONFIG_PATH, "w") as f:
_stash.config.write(f)
def hide_keyboard(self, sender):
"""hides the keyboard."""
if self.cur_tf is None:
return
self.cur_tf.end_editing()
self.cur_tf = None
self.hide_kb_button.enabled = False
# callbacks
@ui.in_background
def switch_changed(self, switch, name):
"""called when a switch was changed."""
section, option = name
v = ("1" if switch.value else "0")
_stash.config.set(section, option, v)
self.save()
@ui.in_background
def choice_changed(self, seg, name):
"""called when a segmentedcontroll was changed."""
section, option = name
v = seg.segments[seg.selected_index]
_stash.config.set(section, option, v)
self.save()
@ui.in_background
def choose_color(self, b, name):
"""called when the user wants to change a color."""
section, option = name
cur = b.background_color[:3]
picker = ColorPicker(cur)
self.subview_open = True
rgb = picker.get_color()
self.subview_open = False
_stash.config.set(section, option, str(rgb))
self.table.reload_data()
self.save()
@ui.in_background
def str_entered(self, tf, name):
"""called when a textfield ended editing."""
section, option = name
text = tf.text
_stash.config.set(section, option, text)
self.save()
@ui.in_background
def run_func(self, f):
"""run a function while showing an ActivityIndicator()"""
self.ai.start()
self.subview_open = True # a subview may have been opened
try:
f()
finally:
self.subview_open = False
self.ai.stop()
if __name__ == "__main__":
# main code
cfg_view = ConfigView()
cfg_view.show() | {
"repo_name": "cclauss/stash",
"path": "bin/easy_config.py",
"copies": "1",
"size": "14853",
"license": "mit",
"hash": -2005808375103440000,
"line_mean": 25.1975308642,
"line_max": 124,
"alpha_frac": 0.6371776745,
"autogenerated": false,
"ratio": 2.679595886703951,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8303648550614098,
"avg_score": 0.10262500211797065,
"num_lines": 567
} |
"""A Graph of Nodes."""
from __future__ import absolute_import, print_function
import logging
import pickle
import warnings
from ascii_canvas import canvas, item
from .errors import CycleError
from .evaluator import LinearEvaluator, ThreadedEvaluator, \
LegacyMultiprocessingEvaluator
from .plug import InputPlug, OutputPlug, InputPlugGroup
from .utilities import deserialize_graph
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
log = logging.getLogger(__name__)
class Graph(object):
"""A graph of Nodes."""
def __init__(self, name=None, nodes=None):
"""Initialize the list of Nodes, inputs and outpus."""
self.name = name or self.__class__.__name__
self.nodes = nodes or []
self.inputs = {}
self.outputs = {}
def __unicode__(self):
"""Display the Graph."""
return self.node_repr()
def __str__(self):
"""Show all input and output Plugs."""
return self.__unicode__().encode('utf-8').decode()
def __getitem__(self, key):
"""Grant access to Nodes via their name."""
for node in self.nodes:
if node.name == key:
return node
# Search through subgraphs if no node found on graph itself
if "." in key:
subgraph_name = key.split(".")[0]
node_name = key.split(".")[-1]
for node in self.all_nodes:
if node.name == node_name and node.graph.name == subgraph_name:
return node
raise KeyError(
"Graph does not contain a Node named '{0}'. "
"If the node is part of a subgraph of this graph, use this "
"form to access the node: '{{subgraph.name}}.{{node.name}}', "
"e.g. 'sub.node'".format(key))
@property
def all_nodes(self):
"""Expand the graph with all its subgraphs into a flat list of nodes.
Please note that in this expanded list, the node names are no longer
guaranteed to be unique!
Returns:
(list of INode): All nodes, including the nodes from subgraphs
"""
nodes = [n for n in self.nodes]
for subgraph in self.subgraphs.values():
nodes += subgraph.nodes
return list(set(nodes))
@property
def subgraphs(self):
"""All other graphs that the nodes of this graph are connected to.
Returns:
A dict in the form of ``{graph.name: graph}``
"""
subgraphs = {}
for node in self.nodes:
for downstream in node.downstream_nodes:
if downstream.graph is not self:
subgraphs[downstream.graph.name] = downstream.graph
for upstream in node.upstream_nodes:
if upstream.graph is not self:
subgraphs[upstream.graph.name] = upstream.graph
return subgraphs
@property
def evaluation_matrix(self):
"""Sort nodes into a 2D matrix based on their dependency.
Rows affect each other and have to be evaluated in sequence.
The Nodes on each row however can be evaluated in parallel as
they are independent of each other.
The amount of Nodes in each row can vary.
Returns:
(list of list of INode): Each sub list represents a row.
"""
levels = {}
for node in self.all_nodes:
self._sort_node(node, levels, level=0)
matrix = []
for level in sorted(list(set(levels.values()))):
row = []
for node in [n for n in levels if levels[n] == level]:
row.append(node)
row.sort(key=lambda key: key.name)
matrix.append(row)
return matrix
@property
def evaluation_sequence(self):
"""Sort Nodes into a sequential, flat execution order.
Returns:
(list of INode): A one dimensional representation of the
evaluation matrix.
"""
return [node for row in self.evaluation_matrix for node in row]
@property
def input_groups(self):
"""Return all inputs that are actually input groups."""
return {k: v for k, v in self.inputs.items() if isinstance(v, InputPlugGroup)}
def add_node(self, node):
"""Add given Node to the Graph.
Nodes on a Graph have to have unique names.
"""
if node not in self.nodes:
for existing_node in self.nodes:
if existing_node.name == node.name:
raise ValueError(
"Can not add Node of name '{0}', a Node with this "
"name already exists on this Graph. Node names on "
"a Graph have to be unique.".format(node.name))
self.nodes.append(node)
node.graph = self
else:
log.warning(
'Node "{0}" is already part of this Graph'.format(node.name))
def delete_node(self, node):
"""Disconnect all plugs and then delete the node object."""
if node in self.nodes:
for plug in node.all_inputs().values():
for connection in plug.connections:
plug.disconnect(connection)
for plug in node.all_outputs().values():
for connection in plug.connections:
plug.disconnect(connection)
del self.nodes[self.nodes.index(node)]
def add_plug(self, plug, name=None):
"""Promote the given plug this graph.
Args:
plug (flowpipe.plug.IPlug): The plug to promote to this graph
name (str): Optionally use the given name instead of the name of
the given plug
"""
if isinstance(plug, InputPlug):
if plug not in self.inputs.values():
self.inputs[name or plug.name] = plug
else:
key = list(self.inputs.keys())[
list(self.inputs.values()).index(plug)]
raise ValueError(
"The given plug '{0}' has already been promoted to this "
"Graph und the key '{1}'".format(plug.name, key))
elif isinstance(plug, OutputPlug):
if plug not in self.outputs.values():
self.outputs[name or plug.name] = plug
else:
key = list(self.outputs.keys())[
list(self.outputs.values()).index(plug)]
raise ValueError(
"The given plug {0} has already been promoted to this "
"Graph und the key '{1}'".format(plug.name, key))
else:
raise TypeError(
"Plugs of type '{0}' can not be promoted directly to a Graph. "
"Only plugs of type '{1}' or '{2}' can be promoted.".format(
type(plug), InputPlug, OutputPlug))
def accepts_connection(self, output_plug, input_plug):
"""Raise exception if new connection would violate integrity of graph.
Args:
output_plug (flowpipe.plug.OutputPlug): The output plug
input_plug (flowpipe.plug.InputPlug): The input plug
Raises:
CycleError and ValueError
Returns:
True if the connection is accepted
"""
out_node = output_plug.node
in_node = input_plug.node
# Plugs can't be connected to other plugs on their own node
if in_node is out_node:
raise CycleError(
'Can\'t connect plugs that are part of the same node.')
# If that is downstream of this
if out_node in in_node.downstream_nodes:
raise CycleError(
'Can\'t connect OutputPlugs to plugs of an upstream node.')
# Names of subgraphs have to be unique
if (
in_node.graph.name in self.subgraphs and
in_node.graph not in self.subgraphs.values()):
raise ValueError(
"This node is part of graph '{0}', but a different "
"graph with the same name is already part of this "
"graph. Subgraph names on a Graph have to "
"be unique".format(in_node.graph.name))
return True
def evaluate(self, mode="linear", skip_clean=False,
submission_delay=0.1, max_workers=None, data_persistence=True,
evaluator=None):
"""Evaluate all Nodes in the graph.
Sorts the nodes in the graph into a resolution order and evaluates the
nodes. Evaluation can be parallelized by utilizing the dependencies
between the nodes - see the "mode" keyword for the options.
Note that no checks are in place whether the node execution is actually
thread-safe or fit for multiprocessing. It is assumed to be given if
the respective mode is selected.
Some keyword arguments do not affect all evaluation modes.
Args:
mode (str): The evaluation mode. Possible modes are
* linear : Iterates over all nodes in a single thread
* threading : Evaluates each node in a new thread
* multiprocessing : Evaluates each node in a new process
skip_clean (bool): Whether to skip nodes that are 'clean' (as
tracked by the 'is_dirty' attribute on the node), i.e. whose
inputs have not changed since their output was computed
submission_delay (float): The delay in seconds between loops
issuing new threads/processes if nodes are ready to process.
max_workers (int): The maximum number of parallel threads to spawn.
None defaults to your pythons ThreadPoolExecutor default.
data_persistence (bool): If false, the data on plugs that have
connections gets cleared (set to None). This reduces the
reference count of objects.
evaluator (flowpipe.evaluators.Evaluator): The evaluator to use.
For the basic evaluation modes will be picked by 'mode'.
"""
log.info('Evaluating Graph "{0}"'.format(self.name))
# map mode keywords to evaluation functions and their arguments
eval_modes = {
"linear": (LinearEvaluator, {}),
"threading": (ThreadedEvaluator, {"max_workers": max_workers}),
"multiprocessing": (LegacyMultiprocessingEvaluator,
{"submission_delay": submission_delay})
}
if mode and evaluator:
raise ValueError("Both 'mode' and 'evaluator' arguments passed.")
elif mode:
try:
eval_cls, eval_args = eval_modes[mode]
except KeyError:
raise ValueError("Unkown mode: {0}".format(mode))
evaluator = eval_cls(**eval_args)
evaluator.evaluate(graph=self, skip_clean=skip_clean)
if not data_persistence:
for node in self.nodes:
for input_plug in node.all_inputs().values():
if input_plug.connections:
input_plug.value = None
for output_plug in node.all_outputs().values():
if output_plug.connections:
output_plug.value = None
def to_pickle(self):
"""Serialize the graph into a pickle."""
return pickle.dumps(self)
def to_json(self):
"""Serialize the graph into a json."""
return self._serialize()
def serialize(self): # pragma: no cover
"""Serialize the graph in its grid form.
Deprecated.
"""
warnings.warn('Graph.serialize is deprecated. Instead, use one of '
'Graph.to_json or Graph.to_pickle',
DeprecationWarning)
return self._serialize()
def _serialize(self, with_subgraphs=True):
"""Serialize the graph in its grid form.
Args:
with_subgraphs (bool): Set to false to avoid infinite recursion
"""
data = OrderedDict(
module=self.__module__,
cls=self.__class__.__name__,
name=self.name)
data['nodes'] = [node.to_json() for node in self.nodes]
if with_subgraphs:
data['subgraphs'] = [
graph._serialize(with_subgraphs=False)
for graph in sorted(
self.subgraphs.values(), key=lambda g: g.name)]
return data
@staticmethod
def from_pickle(data):
"""De-serialize from the given pickle data."""
return pickle.loads(data)
@staticmethod
def from_json(data):
"""De-serialize from the given json data."""
return deserialize_graph(data)
@staticmethod
def deserialize(data): # pragma: no cover
"""De-serialize from the given json data."""
warnings.warn('Graph.deserialize is deprecated. Instead, use one of '
'Graph.from_json or Graph.from_pickle',
DeprecationWarning)
return deserialize_graph(data)
def _sort_node(self, node, parent, level):
"""Sort the node into the correct level."""
if node in parent.keys():
if level > parent[node]:
parent[node] = level
else:
parent[node] = level
for downstream_node in node.downstream_nodes:
self._sort_node(downstream_node, parent, level=level + 1)
def node_repr(self):
"""Format to visualize the Graph."""
canvas_ = canvas.Canvas()
x = 0
evaluation_matrix = self.evaluation_matrix
for row in evaluation_matrix:
y = 0
x_diff = 0
for node in row:
item_ = item.Item(str(node), [x, y])
node.item = item_
x_diff = (item_.bbox[2] - item_.bbox[0] + 4 if
item_.bbox[2] - item_.bbox[0] + 4 > x_diff else x_diff)
y += item_.bbox[3] - item_.bbox[1]
canvas_.add_item(item_)
x += x_diff
# Include the input groups if any have been set
y_off = 2
locked_items = []
if self.input_groups:
for input_group in self.input_groups.values():
y_off += 1
i = item.Item("o {0}".format(input_group.name), [0, y_off])
canvas_.add_item(i)
locked_items.append(i)
for p in input_group.plugs:
y_off += 1
i = item.Item("`-{0}.{1}".format(p.node.name, p.name), [2, y_off])
canvas_.add_item(i)
locked_items.append(i)
# Move all items down by Y
for i in canvas_.items:
if i not in locked_items:
i.position[0] += 2
i.position[1] += y_off + 1 + int(bool(self.input_groups))
canvas_.add_item(item.Rectangle(x, canvas_.bbox[3] + 1, [0, 0]), 0)
# Crop the name of the graph if it is too long
name = self.name
if len(name) > x - 2:
name = name[:x - 2]
canvas_.add_item(
item.Item("{name:^{x}}".format(name=name, x=x), [0, 1]), 0)
canvas_.add_item(item.Rectangle(x, 3, [0, 0]), 0)
if self.input_groups:
canvas_.add_item(item.Rectangle(x, y_off + 2, [0, 0]), 0)
for node in self.all_nodes:
for i, plug in enumerate(node._sort_plugs(node.all_outputs())):
for connection in node._sort_plugs(
node.all_outputs())[plug].connections:
dnode = connection.node
start = [node.item.position[0] + node.item.bbox[2],
node.item.position[1] + 3 + len(node.all_inputs()) + i]
end = [dnode.item.position[0],
dnode.item.position[1] + 3 +
list(dnode._sort_plugs(
dnode.all_inputs()).values()).index(
connection)]
canvas_.add_item(item.Line(start, end), 0)
return canvas_.render()
def list_repr(self):
"""List representation of the graph showing Nodes and connections."""
pretty = []
pretty.append(self.name)
if self.input_groups:
pretty.append("[Input Groups]")
for name in sorted(self.input_groups.keys()):
input_group = self.input_groups[name]
pretty.append(" [g] {0}:".format(name))
for p in input_group.plugs:
pretty.append(" {0}.{1}".format(p.node.name, p.name))
for node in self.evaluation_sequence:
pretty.append(node.list_repr())
return '\n '.join(pretty)
default_graph = Graph(name='default')
def get_default_graph():
"""Retrieve the default graph."""
return default_graph
def set_default_graph(graph):
"""Set a graph as the default graph."""
if not isinstance(graph, Graph):
raise TypeError("Can only set 'Graph' instances as default graph!")
global default_graph
default_graph = graph
def reset_default_graph():
"""Reset the default graph to an empty graph."""
set_default_graph(Graph(name="default"))
| {
"repo_name": "PaulSchweizer/flowpipe",
"path": "flowpipe/graph.py",
"copies": "1",
"size": "17481",
"license": "mit",
"hash": 6751571978559594000,
"line_mean": 36.5128755365,
"line_max": 86,
"alpha_frac": 0.5556318288,
"autogenerated": false,
"ratio": 4.394419306184012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5450051134984012,
"avg_score": null,
"num_lines": null
} |
query = "bird"
try:
graph = ximport("graph")
except ImportError:
graph = ximport("__init__")
reload(graph)
import en
from random import shuffle
#### WORDNET GRAPH ###################################################################################
class wordnetgraph(graph.graph):
""" Browse WordNet in a graph.
The wordnetgraph class is based on the standard graph class.
I've added some functionality to fetch data from WordNet and browse through it.
When you click on a node, the wordnetgraph.click() method is fired.
This will check if the clicked node is a noun in WordNet, and if so,
reload the graph's nodes and connections with that noun at the root.
The main methods, get_senses() and get_relations(),
are called when the graph reloads.
They retrieve data from WordNet and format it as nodes.
The expand() method is called when you click on "has-specific" or "has-parts".
A helper class, senses, draws the interactive word sense selection buttons.
"""
def __init__(self, iterations=2000, distance=1.3):
graph.graph.__init__(self, iterations, distance)
self.styles = graph.create().styles
self.events.click = self.click
self.events.popup = True
# Display a maximum of 20 nodes.
self.max = 20
# A row of buttons to select the current word sense.
self.senses = senses(self, 20, 20)
def is_expandable(self, id):
""" Some of the branches are expandable:
if you click on has-parts or has-specific, a detailed view will load.
"""
if id in ["has-parts", "has-specific"]:
return True
else:
return False
def is_clickable(self, node):
""" Every node that is a noun is clickable (except the root).
"""
if en.is_noun(str(node.id.lower())) \
or self.is_expandable(node.id) and node != self.root:
return True
else:
return False
def get_senses(self, word, top=6):
""" The graph displays the different senses of a noun,
e.g. light -> lighter, luminosity, sparkle, ...
"""
word = str(word)
if self.is_expandable(word): return []
# If there are 4 word senses and each of it a list of words,
# take the first word from each list, then take the second etc.
words = []
for i in range(2):
for sense in en.noun.senses(word):
if len(sense) > i \
and sense[i] != word \
and sense[i] not in words:
words.append(sense[i])
return words[:top]
def get_relations(self, word, previous=None):
""" The graph displays semantic relations for a noun,
e.g. light -> has-specific -> candlelight.
"""
word = str(word)
if self.is_expandable(word):
return self.expand(word, previous)
words = []
lexname = en.noun.lexname(word)
if lexname != "":
words.append((lexname, "category "))
relations = [
(6, en.noun.holonym , "has-parts"),
(2, en.noun.meronym , "is-part-of"),
(2, en.noun.antonym , "is-opposite-of"),
(3, en.noun.hypernym , "is-a"),
(2, en.verb.senses , "is-action"),
(6, en.noun.hyponym , "has-specific"),
]
# Get related words from WordNet.
# Exclude long words and take the top of the list.
for top, f, relation in relations:
r = []
try: rng = f(word, sense=self.senses.current)
except:
try: rng = f(word)
except:
continue
for w in rng:
if w[0] != word \
and w[0] not in r \
and len(w[0]) < 20:
r.append((w[0], relation))
words.extend(r[:top])
return words
def expand(self, relation, previous=None):
""" Zoom in to the hyponym or holonym branch.
"""
if relation == "has-specific" : f = en.noun.hyponym
if relation == "has-parts" : f = en.noun.holonym
root = str(self.root.id.lower())
unique = []
if previous: previous = str(previous)
for w in f(previous, sense=self.senses.current):
if w[0] not in unique: unique.append(w[0])
shuffle(unique)
words = []
i = 0
for w in unique:
# Organise connected nodes in branches of 4 nodes each.
# Nodes that have the root id in their own id,
# form a branch on their own.
label = " "
if w.find(root) < 0:
label = (i+4)/4*" "
i += 1
words.append((w, label))
return words
def click(self, node):
""" If the node is indeed clickable, load it.
"""
if self.is_clickable(node):
p = self.root.id
# Use the previous back node instead of "has specific".
if self.is_expandable(p): p = self.nodes[-1].id
self.load(node.id, previous=p)
def load(self, word, previous=None):
self.clear()
word = str(word)
# Add the root (the clicked node) with the ROOT style.
self.add_node(word, root=True, style="root")
# Add the word senses to the root in the LIGHT style.
for w in self.get_senses(word):
self.add_node(w, style=self.styles.light.name)
self.add_edge(word, w, 0.5)
if len(self) > self.max: break
# Add relation branches to the root in the DARK style.
for w, r in self.get_relations(word, previous):
self.add_node(r, style="dark")
self.add_edge(w, r, 1.0)
self.add_edge(word, r)
if len(self) > self.max: break
# Provide a back link to the previous word.
if previous and previous != self.root.id:
n = self.add_node(previous, 10)
if len(n.links) == 0: self.add_edge(word, n.id)
n.style = "back"
# Indicate the word corresponding to the current sense.
if self.senses.count() > 0:
for w in en.noun.senses(word)[self.senses.current]:
n = self.node(w)
if n and n != self.root:
n.style = "marked"
def draw(self, *args, **kwargs):
""" Additional drawing for sense selection buttons.
"""
graph.graph.draw(self, *args, **kwargs)
self.senses.draw()
### WORD SENSE SELECTION #############################################################################
class senses:
""" A row of word sense selection buttons.
"""
def __init__(self, graph, x, y):
self.graph = graph
self.word = ""
self.x = x
self.y = y
self.current = 0
self.pressed = None
def count(self):
""" The number of senses for the current word.
The current word is synched to the graph's root node.
"""
if self.word != self.graph.root.id:
self.word = str(self.graph.root.id)
self.current = 0
self._count = 0
try: self._count = len(en.noun.senses(self.word))
except:
pass
return self._count
def draw(self):
s = self.graph.styles.default
x, y, f = self.x, self.y, s.fontsize
_ctx.reset()
_ctx.nostroke()
_ctx.fontsize(f)
for i in range(self.count()):
clr = s.fill
if i == self.current:
clr = self.graph.styles.default.background
_ctx.fill(clr)
p = _ctx.rect(x, y, f*2, f*2)
_ctx.fill(s.text)
_ctx.align(CENTER)
_ctx.text(str(i+1), x-f, y+f*1.5, width=f*4)
x += f * 2.2
self.log_pressed(p, i)
self.log_clicked(p, i)
def log_pressed(self, path, i):
""" Update senses.pressed to the last button pressed.
"""
if mousedown \
and self.graph.events.dragged == None \
and path.contains(MOUSEX, MOUSEY):
self.pressed = i
def log_clicked(self, path, i):
""" Update senses.current to the last button clicked.
"""
if not mousedown and self.pressed == i:
self.pressed = None
if path.contains(MOUSEX, MOUSEY):
self.current = i
self.graph.load(self.graph.root.id)
######################################################################################################
g = wordnetgraph(distance=1.2)
g.load(query)
size(550, 550)
speed(30)
def draw():
g.styles.textwidth = 120
g.draw(
directed=True,
weighted=True,
traffic=True
)
| {
"repo_name": "gt-ros-pkg/rcommander-core",
"path": "nodebox_qt/src/graph/graph_example4.py",
"copies": "3",
"size": "9645",
"license": "bsd-3-clause",
"hash": 2524314237547145700,
"line_mean": 29.8178913738,
"line_max": 102,
"alpha_frac": 0.4961119751,
"autogenerated": false,
"ratio": 4.02377972465582,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.034652056137159756,
"num_lines": 313
} |
query = "bird"
try:
graph = ximport("graph")
except ImportError:
graph = ximport("__init__")
reload(graph)
import en
from random import shuffle
#### WORDNET GRAPH ###################################################################################
class wordnetgraph(graph.graph):
""" Browse WordNet in a graph.
The wordnetgraph class is based on the standard graph class.
I've added some functionality to fetch data from WordNet and browse through it.
When you click on a node, the wordnetgraph.click() method is fired.
This will check if the clicked node is a noun in WordNet, and if so,
reload the graph's nodes and connections with that noun at the root.
The main methods, get_senses() and get_relations(),
are called when the graph reloads.
They retrieve data from WordNet and format it as nodes.
The expand() method is called when you click on "has-specific" or "has-parts".
A helper class, senses, draws the interactive word sense selection buttons.
"""
def __init__(self, iterations=2000, distance=1.3):
graph.graph.__init__(self, iterations, distance)
self.styles = graph.create().styles
self.events.click = self.click
self.events.popup = True
# Display a maximum of 20 nodes.
self.max = 20
# A row of buttons to select the current word sense.
self.senses = senses(self, 20, 20)
def is_expandable(self, id):
""" Some of the branches are expandable:
if you click on has-parts or has-specific, a detailed view will load.
"""
if id in ["has-parts", "has-specific"]:
return True
else:
return False
def is_clickable(self, node):
""" Every node that is a noun is clickable (except the root).
"""
if en.is_noun(str(node.id.lower())) \
or self.is_expandable(node.id) and node != self.root:
return True
else:
return False
def get_senses(self, word, top=6):
""" The graph displays the different senses of a noun,
e.g. light -> lighter, luminosity, sparkle, ...
"""
word = str(word)
if self.is_expandable(word): return []
# If there are 4 word senses and each of it a list of words,
# take the first word from each list, then take the second etc.
words = []
for i in range(2):
for sense in en.noun.senses(word):
if len(sense) > i \
and sense[i] != word \
and sense[i] not in words:
words.append(sense[i])
return words[:top]
def get_relations(self, word, previous=None):
""" The graph displays semantic relations for a noun,
e.g. light -> has-specific -> candlelight.
"""
word = str(word)
if self.is_expandable(word):
return self.expand(word, previous)
words = []
lexname = en.noun.lexname(word)
if lexname != "":
words.append((lexname, "category "))
relations = [
(6, en.noun.holonym , "has-parts"),
(2, en.noun.meronym , "is-part-of"),
(2, en.noun.antonym , "is-opposite-of"),
(3, en.noun.hypernym , "is-a"),
(2, en.verb.senses , "is-action"),
(6, en.noun.hyponym , "has-specific"),
]
# Get related words from WordNet.
# Exclude long words and take the top of the list.
for top, f, relation in relations:
r = []
try: rng = f(word, sense=self.senses.current)
except:
try: rng = f(word)
except:
continue
for w in rng:
if w[0] != word \
and w[0] not in r \
and len(w[0]) < 20:
r.append((w[0], relation))
words.extend(r[:top])
return words
def expand(self, relation, previous=None):
""" Zoom in to the hyponym or holonym branch.
"""
if relation == "has-specific" : f = en.noun.hyponym
if relation == "has-parts" : f = en.noun.holonym
root = str(self.root.id.lower())
unique = []
if previous: previous = str(previous)
for w in f(previous, sense=self.senses.current):
if w[0] not in unique: unique.append(w[0])
shuffle(unique)
words = []
i = 0
for w in unique:
# Organise connected nodes in branches of 4 nodes each.
# Nodes that have the root id in their own id,
# form a branch on their own.
label = " "
if w.find(root) < 0:
label = (i+4)/4*" "
i += 1
words.append((w, label))
return words
def click(self, node):
""" If the node is indeed clickable, load it.
"""
if self.is_clickable(node):
p = self.root.id
# Use the previous back node instead of "has specific".
if self.is_expandable(p): p = self.nodes[-1].id
self.load(node.id, previous=p)
def load(self, word, previous=None):
self.clear()
word = str(word)
# Add the root (the clicked node) with the ROOT style.
self.add_node(word, root=True, style="root")
# Add the word senses to the root in the LIGHT style.
for w in self.get_senses(word):
self.add_node(w, style=self.styles.light.name)
self.add_edge(word, w, 0.5)
if len(self) > self.max: break
# Add relation branches to the root in the DARK style.
for w, r in self.get_relations(word, previous):
self.add_node(r, style="dark")
self.add_edge(w, r, 1.0)
self.add_edge(word, r)
if len(self) > self.max: break
# Provide a back link to the previous word.
if previous and previous != self.root.id:
n = self.add_node(previous, 10)
if len(n.links) == 0: self.add_edge(word, n.id)
n.style = "back"
# Indicate the word corresponding to the current sense.
if self.senses.count() > 0:
for w in en.noun.senses(word)[self.senses.current]:
n = self.node(w)
if n and n != self.root:
n.style = "marked"
def draw(self, *args, **kwargs):
""" Additional drawing for sense selection buttons.
"""
graph.graph.draw(self, *args, **kwargs)
self.senses.draw()
### WORD SENSE SELECTION #############################################################################
class senses:
""" A row of word sense selection buttons.
"""
def __init__(self, graph, x, y):
self.graph = graph
self.word = ""
self.x = x
self.y = y
self.current = 0
self.pressed = None
def count(self):
""" The number of senses for the current word.
The current word is synched to the graph's root node.
"""
if self.word != self.graph.root.id:
self.word = str(self.graph.root.id)
self.current = 0
self._count = 0
try: self._count = len(en.noun.senses(self.word))
except:
pass
return self._count
def draw(self):
s = self.graph.styles.default
x, y, f = self.x, self.y, s.fontsize
_ctx.reset()
_ctx.nostroke()
_ctx.fontsize(f)
for i in range(self.count()):
clr = s.fill
if i == self.current:
clr = self.graph.styles.default.background
_ctx.fill(clr)
p = _ctx.rect(x, y, f*2, f*2)
_ctx.fill(s.text)
_ctx.align(CENTER)
_ctx.text(str(i+1), x-f, y+f*1.5, width=f*4)
x += f * 2.2
self.log_pressed(p, i)
self.log_clicked(p, i)
def log_pressed(self, path, i):
""" Update senses.pressed to the last button pressed.
"""
if mousedown \
and self.graph.events.dragged == None \
and path.contains(MOUSEX, MOUSEY):
self.pressed = i
def log_clicked(self, path, i):
""" Update senses.current to the last button clicked.
"""
if not mousedown and self.pressed == i:
self.pressed = None
if path.contains(MOUSEX, MOUSEY):
self.current = i
self.graph.load(self.graph.root.id)
######################################################################################################
g = wordnetgraph(distance=1.2)
g.load(query)
size(550, 550)
speed(30)
def draw():
g.styles.textwidth = 120
g.draw(
directed=True,
weighted=True,
traffic=True
)
| {
"repo_name": "zaqwes8811/micro-apps",
"path": "extern/GPL_libs/graph/graph_example4.py",
"copies": "2",
"size": "9957",
"license": "mit",
"hash": -5565381609396478000,
"line_mean": 29.8178913738,
"line_max": 102,
"alpha_frac": 0.4805664357,
"autogenerated": false,
"ratio": 4.122981366459627,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5603547802159627,
"avg_score": null,
"num_lines": null
} |
# a graph type used for dataflow analysis.
# Not a very general implementation
# based on the description in Appel's book
from bisect import bisect, insort
class T:
def __init__(self):
self._nodes = []
self._succ = {}
self._pred = {}
self.nextNode = 0
def _getNextNode(self):
n = self.nextNode
self.nextNode += 1
return n
def succ(self,node):
"return a sorted list of successor nodes"
return self._succ[node]
def pred(self,node):
"return a sorted list of predecessor nodes"
return self._pred[node]
def adj(self, node):
"return a list of adjacent nodes"
pass
def eq(self, a, b):
"return true if a and b are the same node"
pass
def newNode(self):
"return a new unconnected node"
n = self._getNextNode()
# consider using array.array or a bitset for these
self._succ[n] = []
self._pred[n] = []
return n
def newEdge(self, a, b):
"make a new edge from a to b"
s = self._succ[a]
i = bisect(s,b)
if i == 0 or s[i-1] != b:
s.insert(i,b)
s = self._pred[b]
i = bisect(s,a)
if i == 0 or s[-1] != a:
s.insert(i,a)
def delEdge(self, a, b):
"remove the edge between a and b"
| {
"repo_name": "ericchill/gnofract4d",
"path": "fract4d/graph.py",
"copies": "1",
"size": "1393",
"license": "bsd-3-clause",
"hash": -2366310744052419600,
"line_mean": 23.0172413793,
"line_max": 58,
"alpha_frac": 0.5204594401,
"autogenerated": false,
"ratio": 3.599483204134367,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4619942644234367,
"avg_score": null,
"num_lines": null
} |
"""A grid plane component.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005-2006, Enthought, Inc.
# License: BSD Style.
# Enthought library imports.
from traits.api import Instance, Enum, Int, Range
from traitsui.api import View, Group, Item
from tvtk.api import tvtk
from apptools.persistence import state_pickler
# Local imports.
from mayavi.core.component import Component
from mayavi.core.common import error
def _get_extent(inp):
"""Get the extents from the given input.
"""
d = inp.dimensions
return [0, d[0]-1, 0, d[1]-1, 0, d[2]-1]
######################################################################
# `GridPlane` class.
######################################################################
class GridPlane(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The TVTK object that extracts the grid plane. This is created
# dynamically based on the input data type.
plane = Instance(tvtk.Object)
# The axis which is normal to the plane chosen.
axis = Enum('x', 'y', 'z',
desc='specifies the axis normal to the grid plane')
# The position of the grid plane.
position = Range(value=0, low='_low', high='_high',
enter_set=True, auto_set=False)
########################################
# Private traits.
# Determines the lower limit of the position trait and is always 0.
_low = Int(0)
# Determines the upper limit of the position trait. The value is
# dynamically set depending on the input data and state of the
# axis trait. The default is some large value to avoid errors in
# cases where the user may set the position before adding the
# object to the mayavi tree.
_high = Int(10000)
########################################
# View related traits.
# The View for this object.
view = View(Group(Item(name='axis'),
Item(name='position', enabled_when='_high > 0'))
)
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(GridPlane, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('plane', '_low', '_high'):
d.pop(name, None)
return d
def __set_pure_state__(self, state):
state_pickler.set_state(self, state)
self._position_changed(self.position)
######################################################################
# `Component` interface
######################################################################
def setup_pipeline(self):
"""Override this method so that it *creates* its tvtk
pipeline.
This method is invoked when the object is initialized via
`__init__`. Note that at the time this method is called, the
tvtk data pipeline will *not* yet be setup. So upstream data
will not be available. The idea is that you simply create the
basic objects and setup those parts of the pipeline not
dependent on upstream sources and filters.
"""
pass
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if len(self.inputs) == 0:
return
input = self.inputs[0].outputs[0]
plane = None
if input.is_a('vtkStructuredGrid'):
plane = tvtk.StructuredGridGeometryFilter()
elif input.is_a('vtkStructuredPoints') or input.is_a('vtkImageData'):
plane = tvtk.ImageDataGeometryFilter ()
elif input.is_a('vtkRectilinearGrid'):
plane = tvtk.RectilinearGridGeometryFilter ()
else:
msg = "The GridPlane component does not support the %s dataset."\
%(input.class_name)
error(msg)
raise TypeError(msg)
self.configure_connection(plane, self.inputs[0])
self.plane = plane
self.plane.update()
self.outputs = [plane.output]
self._update_limits()
self._update_extents()
# If the data is 2D make sure that we default to the
# appropriate axis.
extents = list(_get_extent(input))
diff = [y-x for x, y in zip(extents[::2], extents[1::2])]
if diff.count(0) > 0:
self.axis = ['x', 'y', 'z'][diff.index(0)]
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self._update_limits()
self._update_extents()
# Propagate the data_changed event.
self.data_changed = True
def has_output_port(self):
""" The filter has an output port."""
return True
def get_output_object(self):
""" Returns the output port."""
return self.plane.output_port
######################################################################
# Non-public methods.
######################################################################
def _get_axis_index(self):
return {'x':0, 'y':1, 'z':2}[self.axis]
def _update_extents(self):
inp = self.plane.input
extents = list(_get_extent(inp))
pos = self.position
axis = self._get_axis_index()
extents[2*axis] = pos
extents[2*axis+1] = pos
try:
self.plane.set_extent(extents)
except AttributeError:
self.plane.extent = extents
def _update_limits(self):
extents = _get_extent(self.plane.input)
axis = self._get_axis_index()
pos = min(self.position, extents[2*axis+1])
self._high = extents[2*axis+1]
return pos
def _axis_changed(self, val):
if len(self.inputs) == 0:
return
pos = self._update_limits()
if self.position == pos:
self._update_extents()
self.data_changed = True
else:
self.position = pos
def _position_changed(self, val):
if len(self.inputs) == 0:
return
self._update_extents()
self.data_changed = True
| {
"repo_name": "dmsurti/mayavi",
"path": "mayavi/components/grid_plane.py",
"copies": "1",
"size": "6555",
"license": "bsd-3-clause",
"hash": 2782541675271855600,
"line_mean": 32.9637305699,
"line_max": 77,
"alpha_frac": 0.5363844394,
"autogenerated": false,
"ratio": 4.295543905635649,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5331928345035648,
"avg_score": null,
"num_lines": null
} |
""" A Group for toggling the visibility of a task's dock panes. """
# Enthought library imports.
from pyface.action.api import Action, ActionItem, Group
from traits.api import cached_property, Instance, List, on_trait_change, \
Property, Unicode
# Local imports.
from pyface.tasks.i_dock_pane import IDockPane
class DockPaneToggleAction(Action):
""" An Action for toggling the visibility of a dock pane.
"""
#### 'DockPaneToggleAction' interface #####################################
dock_pane = Instance(IDockPane)
#### 'Action' interface ###################################################
name = Property(Unicode, depends_on='dock_pane.name')
style = 'toggle'
tooltip = Property(Unicode, depends_on='name')
###########################################################################
# 'Action' interface.
###########################################################################
def destroy(self):
super(DockPaneToggleAction, self).destroy()
# Make sure that we are not listening to changes to the pane anymore.
# In traits style, we will set the basic object to None and have the
# listener check that if it is still there.
self.dock_pane = None
def perform(self, event=None):
if self.dock_pane:
self.dock_pane.visible = not self.dock_pane.visible
###########################################################################
# Protected interface.
###########################################################################
def _get_name(self):
if self.dock_pane is None:
return 'UNDEFINED'
return self.dock_pane.name
def _get_tooltip(self):
return u'Toggles the visibility of the %s pane.' % self.name
@on_trait_change('dock_pane.visible')
def _update_checked(self):
if self.dock_pane:
self.checked = self.dock_pane.visible
@on_trait_change('dock_pane.closable')
def _update_visible(self):
if self.dock_pane:
self.visible = self.dock_pane.closable
class DockPaneToggleGroup(Group):
""" A Group for toggling the visibility of a task's dock panes.
"""
#### 'Group' interface ####################################################
id = 'DockPaneToggleGroup'
items = List
#### 'DockPaneToggleGroup' interface ######################################
task = Property(depends_on='parent.controller')
@cached_property
def _get_task(self):
manager = self.get_manager()
if manager is None or manager.controller is None:
return None
return manager.controller.task
dock_panes = Property(depends_on='task.window._states.dock_panes')
@cached_property
def _get_dock_panes(self):
if self.task is None or self.task.window is None:
return []
task_state = self.task.window._get_state(self.task)
return task_state.dock_panes
def get_manager(self):
# FIXME: Is there no better way to get a reference to the menu manager?
manager = self
while isinstance(manager, Group):
manager = manager.parent
return manager
#### Private interface ####################################################
@on_trait_change('dock_panes[]')
def _dock_panes_updated(self):
"""Recreate the group items when dock panes have been added/removed.
"""
# Remove the previous group items.
self.destroy()
items = []
for dock_pane in self.dock_panes:
action = DockPaneToggleAction(dock_pane=dock_pane)
items.append(ActionItem(action=action))
items.sort(key=lambda item: item.action.name)
self.items = items
# Inform the parent menu manager.
manager = self.get_manager()
manager.changed = True
| {
"repo_name": "pankajp/pyface",
"path": "pyface/tasks/action/dock_pane_toggle_group.py",
"copies": "5",
"size": "3899",
"license": "bsd-3-clause",
"hash": -6759156949744195000,
"line_mean": 30.192,
"line_max": 79,
"alpha_frac": 0.5442421134,
"autogenerated": false,
"ratio": 4.346711259754738,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7390953373154737,
"avg_score": null,
"num_lines": null
} |
"""A group of functions to help Python 2 act like Python 3"""
import sys
def run_script(path, globals=None, locals=None):
# In Python 2, we can use execfile(...), but in Python 3 that function
# doesn't exist, and we instead use exec(open(...)). Since the latter
# approach always works, just use that. Also, make sure to handle global
# and local namespace, otherwise imports don't seem to work.
if globals is None:
globals = sys._getframe(1).f_globals
if locals is None:
locals = sys._getframe(1).f_locals
with open(path, "r") as fh:
exec(fh.read()+"\n", globals, locals)
def print_stdout(msg):
"""Print to standard output"""
# In Python 3, the write(...) function returns a value, so store that value
# in a dummy variable so that it doesn't print.
dummy = sys.stdout.write(msg + '\n')
def print_stderr(msg):
"""Print to standard error"""
# In Python 3, the write(...) function returns a value, so store that value
# in a dummy variable so that it doesn't print.
dummy = sys.stderr.write(msg + '\n')
def print_msg(msg, output_channel='stdout'):
"""Print a string to standard output or standard error"""
if output_channel.upper() == 'STDOUT':
print_stdout(msg)
elif output_channel.upper() == 'STDERR':
print_stderr(msg)
else:
raise ValueError(
'Invalid output channel. Choose from the strings STDOUT, STDERR.')
# If running Python 2, make the range function act like xrange. That is
# essentially what Python 3 does.
try:
xrange
range = xrange
# For Python 3, use the built-in range function, which acts like Python 2's
# xrange.
except NameError:
range = range
# Use this to check that modules are using the custom range function
'''
def range(*args):
print('Using custom range function')
return range(*args)
'''
| {
"repo_name": "belson17/modred",
"path": "modred/py2to3.py",
"copies": "1",
"size": "1947",
"license": "bsd-2-clause",
"hash": -1327347564945691400,
"line_mean": 31,
"line_max": 79,
"alpha_frac": 0.636877247,
"autogenerated": false,
"ratio": 3.9412955465587043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5078172793558704,
"avg_score": null,
"num_lines": null
} |
""" A group of procedures. Note there is a many-to-many relationship between procedure and procedure groups
:Authors: Sana dev team
:Version: 2.0
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from mds.api.utils import make_uuid
@python_2_unicode_compatible
class ProcedureGroup(models.Model):
""" A group of procedures"""
class Meta:
app_label = "core"
uuid = models.SlugField(max_length=36, unique=True, default=make_uuid, editable=False)
""" A universally unique identifier """
created = models.DateTimeField(auto_now_add=True)
""" When the object was created """
modified = models.DateTimeField(auto_now=True)
""" updated on modification """
title = models.CharField(max_length=255)
""" A descriptive title for the procedure group. """
author = models.CharField(max_length=255)
""" The creator of the procedure group """
description = models.TextField()
""" Additional narrative information about the group of procedures. """
procedures = models.ManyToManyField('Procedure', related_name='groups')
voided = models.BooleanField(default=False)
def __str__(self):
return "%s" % (self.title)
| {
"repo_name": "SanaMobile/sana.mds",
"path": "src/mds/core/models/procedure_group.py",
"copies": "1",
"size": "1259",
"license": "bsd-3-clause",
"hash": -6674566372207257000,
"line_mean": 29.7073170732,
"line_max": 107,
"alpha_frac": 0.6838760921,
"autogenerated": false,
"ratio": 3.959119496855346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985583326836157,
"avg_score": 0.05743246411875512,
"num_lines": 41
} |
"""A group of useful functions"""
import inspect
import os
import numpy as np
from numpy import polymul, polyadd
import scipy
import scipy.linalg
import scipy.signal
from .py2to3 import range
class UndefinedError(Exception): pass
def atleast_2d_row(array):
"""Converts 1d arrays to 2d arrays, but always as row vectors"""
array = np.array(array)
if array.ndim < 2:
return np.atleast_2d(array)
else:
return array
def atleast_2d_col(array):
"""Converts 1d arrays to 2d arrays, but always as column vectors"""
array = np.array(array)
if array.ndim < 2:
return np.atleast_2d(array).T
else:
return array
def make_iterable(arg):
"""Checks if ``arg`` is iterable. If not, makes it a one-element list.
Otherwise returns ``arg``."""
try:
iterator = iter(arg)
return arg
except TypeError:
return [arg]
def flatten_list(my_list):
"""Flatten a list of lists into a single list."""
return [num for elem in my_list for num in elem]
def save_array_text(array, file_name, delimiter=None):
"""Saves a 1D or 2D array to a text file.
Args:
``array``: 1D or 2D or array to save to file.
``file_name``: Filepath to location where data is to be saved.
Kwargs:
``delimiter``: Delimiter in file. Default is same as ``numpy.savetxt``.
Format of saved files is::
2.3 3.1 2.1 ...
5.1 2.2 9.8 ...
7.6 3.1 5.5 ...
0.1 1.9 9.1 ...
...
Complex data is saved in the following format (as floats)::
real[0,0] imag[0,0] real[0,1] imag[0,1] ...
real[1,0] imag[1,0] real[1,1] imag[1,1] ...
...
Files can be read in Matlab with the provided functions or often
with Matlab's ``load``.
"""
# Force data to be an array
array = np.array(array)
# If array is 1d, then make it into a 2d column vector
if array.ndim == 1:
array = atleast_2d_col(array)
elif array.ndim > 2:
raise RuntimeError('Cannot save an array with >2 dimensions')
# Save data
if delimiter is None:
np.savetxt(file_name, array.view(float))
else:
np.savetxt(file_name, array.view(float), delimiter=delimiter)
def load_array_text(file_name, delimiter=None, is_complex=False):
"""Reads data saved in a text file, returns an array.
Args:
``file_name``: Name of file from which to load data.
Kwargs:
``delimiter``: Delimiter in file. Default is same as ``numpy.loadtxt``.
``is_complex``: Boolean describing whether the data to be loaded is
complex valued.
Returns:
``array``: 2D array containing loaded data.
See :py:func:`save_array_text` for the format used by this function.
"""
# Set data type
if is_complex:
dtype = complex
else:
dtype = float
# Load data
array = np.loadtxt(file_name, delimiter=delimiter, ndmin=2)
if is_complex and array.shape[1] % 2 != 0:
raise ValueError(
('Cannot load complex data, file %s has an odd number of columns. '
'Maybe it has real data.') % file_name)
# Cast as an array, copies to make it C-contiguous memory
return np.array(array.view(dtype))
def get_file_list(directory, file_extension=None):
"""Returns list of files in ``directory`` with ``file_extension``."""
files = os.listdir(directory)
if file_extension is not None:
if len(file_extension) == 0:
print('Warning: gave an empty file extension.')
filtered_files = []
for f in files:
if f[-len(file_extension):] == file_extension:
filtered_files.append(f)
return filtered_files
else:
return files
def get_data_members(obj):
"""Returns a dictionary containing data members of ``obj``."""
data_members = {}
for name in dir(obj):
value = getattr(obj, name)
if not name.startswith('__') and not inspect.ismethod(value):
data_members[name] = value
return data_members
def sum_arrays(arr1, arr2):
"""Used for ``allreduce`` command."""
return np.array(arr1) + np.array(arr2)
def sum_lists(list1, list2):
"""Sums the elements of each list, returns a new list.
This function is used in MPI reduce commands, but could be used
elsewhere too."""
assert len(list1) == len(list2)
return [list1[i] + list2[i] for i in range(len(list1))]
def smart_eq(arg1, arg2):
"""Checks for equality, accounting for the fact that numpy's ``==`` doesn't
return a bool. In that case, returns True only if all elements are equal."""
if type(arg1) != type(arg2):
return False
if isinstance(arg1, np.ndarray):
if arg1.shape != arg2.shape:
return False
return (arg1 == arg2).all()
return arg1 == arg2
class InnerProductBlock(object):
"""Only used in tests. Takes inner product of all vectors."""
def __init__(self, inner_product):
self.inner_product = inner_product
def __call__(self, vecs1, vecs2):
n1 = len(vecs1)
n2 = len(vecs2)
IP_array = np.zeros(
(n1, n2),
dtype=type(self.inner_product(vecs1[0], vecs2[0])))
for i in range(n1):
for j in range(n2):
IP_array[i, j] = self.inner_product(vecs1[i], vecs2[j])
return IP_array
def svd(array, atol=1e-13, rtol=None):
"""Wrapper for ``numpy.linalg.svd``, computes the singular value
decomposition of an array.
Args:
``array``: Array to take singular value decomposition of.
Kwargs:
``atol``: Level below which singular values are truncated.
``rtol``: Maximum relative difference between largest and smallest
singular values. Smaller ones are truncated.
Returns:
``U``: Array whose columns are left singular vectors.
``S``: 1D array of singular values.
``V``: Array whose columns are right singular vectors.
Truncates ``U``, ``S``, and ``V`` such that the singular values
obey both ``atol`` and ``rtol``.
"""
# Compute SVD (force data to be array)
U, S, V_conj_T = np.linalg.svd(np.array(array), full_matrices=False)
V = V_conj_T.conj().T
# Figure out how many singular values satisfy the tolerances
if atol is not None:
num_nonzeros_atol = (abs(S) > atol).sum()
else:
num_nonzeros_atol = S.size
if rtol is not None:
num_nonzeros_rtol = (
abs(S[:num_nonzeros_atol]) / abs(S[0]) > rtol).sum()
num_nonzeros = min(num_nonzeros_atol, num_nonzeros_rtol)
else:
num_nonzeros = num_nonzeros_atol
# Truncate arrays according to tolerances
U = U[:, :num_nonzeros]
V = V[:, :num_nonzeros]
S = S[:num_nonzeros]
return U, S, V
def eigh(array, atol=1e-13, rtol=None, is_positive_definite=False):
"""Wrapper for ``numpy.linalg.eigh``. Computes eigendecomposition of a
Hermitian array.
Args:
``array``: Array to take eigendecomposition of.
``atol``: Value below which eigenvalues (and corresponding
eigenvectors) are truncated.
``rtol``: Maximum relative difference between largest and smallest
eigenvalues. Smaller ones are truncated.
``is_positive_definite``: If true, array being decomposed will be
assumed to be positive definite. Tolerance will be automatically
adjusted (if necessary) so that only positive eigenvalues are returned.
Returns:
``eigvals``: 1D array of eigenvalues, sorted in descending order (of
magnitude).
``eigvecs``: Array whose columns are eigenvectors.
"""
# Compute eigendecomposition (force data to be array)
eigvals, eigvecs = np.linalg.eigh(np.array(array))
# Sort the vecs and eigvals by eigval magnitude. The first element will
# have the largest magnitude and the last element will have the smallest
# magnitude.
sort_indices = np.argsort(np.abs(eigvals))[::-1]
eigvals = eigvals[sort_indices]
eigvecs = eigvecs[:, sort_indices]
# Adjust absolute tolerance for positive definite case if there are negative
# eigenvalues and the most negative one has magnitude greater than the
# given tolerance. In that case, we assume the given tolerance is too
# samll (relative to the accuracy of the computation) and increase it to at
# least filter out negative eigenvalues.
if is_positive_definite and eigvals.min() < 0 and abs(eigvals.min()) > atol:
atol = abs(eigvals.min())
# Filter out small and negative eigenvalues, if necessary
if atol is not None:
num_nonzeros_atol = (abs(eigvals) > atol).sum()
else:
num_nonzeros_atol = eigvals.size
if rtol is not None:
num_nonzeros_rtol = (
abs(eigvals[:num_nonzeros_atol]) / abs(eigvals[0]) > rtol).sum()
num_nonzeros = min(num_nonzeros_atol, num_nonzeros_rtol)
else:
num_nonzeros = num_nonzeros_atol
eigvals = eigvals[:num_nonzeros]
eigvecs = eigvecs[:, :num_nonzeros]
return eigvals, eigvecs
def eig_biorthog(array, scale_choice='left'):
"""Wrapper for ``numpy.linalg.eig`` that returns both left and right
eigenvectors. Eigenvalues and eigenvectors are sorted and scaled so that
the left and right eigenvector arrays are orthonormal.
Args:
``array``: Array to take eigendecomposition of.
Kwargs:
``scale_choice``: Determines whether 'left' (default) or 'right'
eigenvectors will be scaled to yield a biorthonormal set. The other
eigenvectors will be left unscaled, leaving them with unit norms.
Returns:
``evals``: 1D array of eigenvalues.
``R_evecs``: Array whose columns are right eigenvectors.
``L_evecs``: Array whose columns are left eigenvectors.
"""
# Force data to be array
array = np.array(array)
# Compute eigendecomposition
evals, L_evecs, R_evecs = scipy.linalg.eig(array, left=True, right=True)
# Scale the evecs to get a biorthogonal set
scale_factors = np.diag(np.dot(L_evecs.conj().T, R_evecs))
if scale_choice.lower() == 'left':
L_evecs /= scale_factors.conj()
elif scale_choice.lower() == 'right':
R_evecs /= scale_factors
else:
raise ValueError('Invalid scale choice. Must be LEFT or RIGHT.')
return evals, R_evecs, L_evecs
def balanced_truncation(
A, B, C, order=None, return_sing_vals=False):
"""Balance and truncate discrete-time linear time-invariant (LTI) system
defined by A, B, C arrays. It's not very accurate due to numerical issues.
Args:
``A``, ``B``, ``C``: LTI discrete-time arrays.
Kwargs:
``order``: Order (number of states) of truncated system. Default is to
use the maximal possible value (can truncate system afterwards).
Returns:
``A_balanced``, ``B_balanced``, ``C_balanced``: LTI discrete-time
arrays of balanced system.
If ``return_sing_vals`` is True, also returns:
``sing_vals``: Hankel singular values.
Notes:
- ``D`` is unchanged by balanced truncation.
- This function is not computationally efficient or accurate relative to
Matlab's ``balancmr``.
"""
A = np.array(A)
B = np.array(B)
C = np.array(C)
gram_cont = scipy.linalg.solve_lyapunov(A, B.dot(B.transpose().conj()))
gram_obsv = scipy.linalg.solve_lyapunov(A.transpose().conj(),
C.transpose().conj().dot(C))
Uc, Ec, Vc = svd(gram_cont)
Uo, Eo, Vo = svd(gram_obsv)
Lc = Uc.dot(np.diag(Ec**0.5))
Lo = Uo.dot(np.diag(Eo**0.5))
U, E, V = svd(Lo.transpose().dot(Lc))
if order is None:
order = len(E)
SL = Lo.dot(U[:, :order]).dot(np.diag(E[:order]**-0.5))
SR = Lc.dot(V[:, :order]).dot(np.diag(E[:order]**-0.5))
A_bal_trunc = SL.transpose().dot(A).dot(SR)
B_bal_trunc = SL.transpose().dot(B)
C_bal_trunc = C.dot(SR)
if return_sing_vals:
return A_bal_trunc, B_bal_trunc, C_bal_trunc, E
else:
return A_bal_trunc, B_bal_trunc, C_bal_trunc
def drss(num_states, num_inputs, num_outputs):
"""Generates a discrete-time random state-space system.
Args:
``num_states``: Number of states.
``num_inputs``: Number of inputs.
``num_outputs``: Number of outputs.
Returns:
``A``, ``B``, ``C``: State-space arrays of discrete-time system.
By construction, all eigenvalues are real and stable.
"""
# eig_vals = np.linspace(.9, .95, num_states)
# eig_vecs = np.random.normal(0, 2., (num_states, num_states))
eig_vals = np.linspace(.2, .95, num_states)
eig_vecs = np.random.normal(0, 2., (num_states, num_states))
A = np.real(
np.linalg.inv(eig_vecs).dot(np.diag(eig_vals).dot(eig_vecs)))
B = np.random.normal(0, 1., (num_states, num_inputs))
C = np.random.normal(0, 1., (num_outputs, num_states))
return A, B, C
def rss(num_states, num_inputs, num_outputs):
"""Generates a continuous-time random state-space system.
Args:
``num_states``: Number of states.
``num_inputs``: Number of inputs.
``num_outputs``: Number of outputs.
Returns:
``A``, ``B``, ``C``: State-space arrays of continuous-time system.
By construction, all eigenvalues are real and stable.
"""
e_vals = -np.random.random(num_states)
transformation = np.random.random((num_states, num_states))
A = np.linalg.inv(transformation).dot(np.diag(e_vals)).dot(transformation)
B = np.random.random((num_states, num_inputs))
C = np.random.random((num_outputs, num_states))
return A, B, C
def lsim(A, B, C, inputs, initial_condition=None):
"""Simulates a discrete-time system with arbitrary inputs.
:math:`x(n+1) = Ax(n) + Bu(n)`
:math:`y(n) = Cx(n)`
Args:
``A``, ``B``, and ``C``: State-space system arrays.
``inputs``: Array of inputs :math:`u`, with dimensions
``[num_time_steps, num_inputs]``.
Kwargs:
``initial_condition``: Initial condition :math:`x(0)`.
Returns:
``outputs``: Array of outputs :math:`y`, with dimensions
``[num_time_steps, num_outputs]``.
``D`` array is assumed to be zero.
"""
A = np.array(A)
B = np.array(B)
C = np.array(C)
ss = scipy.signal.StateSpace(A, B, C,
np.zeros((C.shape[0], B.shape[1])), dt=1)
tout_dum, outputs, xout_dum = scipy.signal.dlsim(
ss, inputs, x0=initial_condition)
return outputs
def impulse(A, B, C, num_time_steps=None):
"""Generates impulse response outputs for a discrete-time system.
Args:
``A``, ``B``, ``C``: State-space system arrays.
Kwargs:
``num_time_steps``: Number of time steps to simulate.
Returns:
``outputs``: Impulse response outputs, with indices corresponding to
[time step, output, input].
No D array is included, but one can simply be prepended to the output if
it is non-zero.
"""
ss = scipy.signal.StateSpace(A, B, C, np.zeros((C.shape[0], B.shape[1])), dt=1)
if num_time_steps is not None:
dum, Markovs = scipy.signal.dimpulse(ss, n=num_time_steps+1)
else:
dum, Markovs = scipy.signal.dimpulse(ss)
# Remove the first element, which is 0, since we define C*B as first output
# of impulse response, i.e., x(0) == B.
Markovs = np.array(Markovs).swapaxes(0, 1).swapaxes(1, 2)[1:]
return Markovs
def load_signals(signal_path, delimiter=None):
"""Loads signals from text files with columns [t signal1 signal2 ...].
Args:
``signal_paths``: List of filepaths to files containing signals.
Returns:
``time_values``: 1D array of time values.
``signals``: Array of signals with dimensions [time, signal].
Convenience function. Example file has format::
0 0.1 0.2
1 0.2 0.46
2 0.2 1.6
3 0.6 0.1
"""
raw_data = load_array_text(signal_path, delimiter=delimiter)
num_signals = raw_data.shape[1] - 1
if num_signals == 0:
raise ValueError('Data must have at least two columns')
time_values = raw_data[:, 0]
signals = raw_data[:, 1:]
# Guarantee that signals is 2D
if signals.ndim == 1:
signals = signals.reshape((signals.shape[0], 1))
return time_values, signals
def load_multiple_signals(signal_paths, delimiter=None):
"""Loads multiple signal files from text files with columns [t channel1
channel2 ...].
Args:
``signal_paths``: List of filepaths to files containing signals.
Returns:
``time_values``: 1D array of time values.
``all_signals``: Array of signals with indices [path, time, signal].
See :py:func:`load_signals`.
"""
num_signal_paths = len(signal_paths)
# Read the first file to get parameters
time_values, signals = load_signals(signal_paths[0], delimiter=delimiter)
num_time_values = len(time_values)
num_signals = signals.shape[1]
# Now allocate array and read all of the signals
all_signals = np.zeros((num_signal_paths, num_time_values, num_signals))
# Set the signals we already loaded
all_signals[0] = signals
# Load all remaining files
for path_num, signal_path in enumerate(signal_paths):
time_values_read, signals = load_signals(signal_path,
delimiter=delimiter)
if not np.allclose(time_values_read, time_values):
raise ValueError('Time values in %s are inconsistent with '
'other files')
all_signals[path_num] = signals
return time_values, all_signals
def Hankel(first_col, last_row=None):
"""
Construct a Hankel array, whose skew diagonals are constant.
Args:
``first_col``: 1D array corresponding to first column of Hankel array.
Kwargs:
``last_row``: 1D array corresponding to the last row of Hankel array.
First element will be ignored. Default is an array of zeros of the same
size as ``first_col``.
Returns:
Hankel: 2D array with dimensions ``[len(first_col), len(last_row)]``.
"""
first_col = np.array(first_col).flatten()
if last_row is None:
last_row = np.zeros(first_col.shape)
else:
last_row = last_row.flatten()
unique_vals = np.concatenate((first_col, last_row[1:]))
a, b = np.ogrid[0:len(first_col), 0:len(last_row)]
indices = a + b
return unique_vals[indices]
def Hankel_chunks(first_col_chunks, last_row_chunks=None):
"""
Construct a Hankel array using chunks, whose elements have Hankel structure
at the chunk level (constant along skew diagonals), rather than at the
element level.
Args:
``first_col_chunks``: List of 2D arrays corresponding to the first
column of Hankel array chunks.
Kwargs:
``last_row_chunks``: List of 2D arrays corresponding to the last row of
Hankel array chunks. Default is a list of arrays of zeros.
Returns:
Hankel: 2D array with dimension
``[len(first_col) * first_col[0].shape[0],
len(last_row) * last_row[0].shape[1]]``.
"""
# If nothing is passed in for last row, use a list of chunks where each
# chunk is an array of zeros, and each chunk has the same size as the chunks
# in the first column.
if last_row_chunks is None:
last_row_chunks = [
np.zeros(first_col_chunks[0].shape)] * len(first_col_chunks)
# Gather the unique chunks in one list
unique_chunks = first_col_chunks + last_row_chunks[1:]
# Use a list comprehension to create a list where each element of the list
# is an array corresponding a all the chunks in a row. To get that array,
# slice the list of unique chunks using the right index and call hstack on
# it. Finally, call vstack on the list comprehension to get the whole
# Hankel array.
return np.vstack([np.hstack(
np.array(unique_chunks[idx:idx + len(last_row_chunks)]))
for idx in range(len(first_col_chunks))])
| {
"repo_name": "belson17/modred",
"path": "modred/util.py",
"copies": "1",
"size": "20177",
"license": "bsd-2-clause",
"hash": -5176008193480600000,
"line_mean": 31.1802232855,
"line_max": 83,
"alpha_frac": 0.6252663924,
"autogenerated": false,
"ratio": 3.571782616392282,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46970490087922817,
"avg_score": null,
"num_lines": null
} |
"""A group of utility functions for validating data and graphing them, as well as automating pulling
from created dictionaries."""
# This library contains utility functions for visualizing the results of clustering algorithms
# from scikit learn. It relies on matplotlib, seaborn, and pylab. This exists because the natural
# input to most machine learning algorithms is an array of vectors. The resulting predictions along
# with their tags can be represented succintly as a tuple containing a vector and the label given
# to it. However, most graphing functions require a list of the coordinates in each dimensions;
# this necessiates splitting the list of tuples vertically for passing to the graphing function.
import pandas as pd
import numpy as np
import matplotlib as plt
import pylab as pyl
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
import bisect
import datetime
import warnings
from sklearn import cluster
from hmmlearn import hmm
def tuple_check(NateTuple):
"""Takes in a tuple, returns true only if every member of the tuple is a number."""
filtered_tuple = np.isnan(NateTuple)
if all(item==False for item in filtered_tuple):
return True
else:
return False
def pull_from_tag(tag_to_pull,whichpair,list_to_pull_from):
"""Returns all items with tag_to_pull from iterable list_to_pull_from using whichpair to
determine which element to take out"""
if whichpair == 1:
return [x for x,y in list_to_pull_from if y == tag_to_pull] #decides whether to return first element or second
else:
return [y for x,y in list_to_pull_from if x == tag_to_pull]
def tuple_list_creator(list_to_generate_from):
"""Takes in a list of lists of tuples, and then slices them vertically to return a lists of lists of x-
dimensions the same as that of the tuple represented as a vector."""
list_to_return = []
for x in list_to_generate_from:
list_to_return.append(zip(*x)) #this is the part doing the slicing
return list_to_return
colormap = ['#66FF66','#008000','#000066','#8080FF','#660000','#FF4D4D','#990099','#FF33FF','#808000','#FFFF4D','#B26B00','#FFAD33','#476B6B','#A3C2C2','#6B2400','#D6AD99','#FFFFFF','#000000']
#colormap is a list that provides HTML color codes for makePlot to use. It can represent up to
#eighteen different data sets.
def makePlot_3d(coordinate_list):
"""Creates a 3d plot of objects with multiple tags from coordinate list.
coordinate_list is a list of tuples of lists, where each tuple element is a set of
coordinates for that particular list. Ex: [([x,x,x,x],[y,y,y,y],[z,z,z,z]),...]"""
plotObjectBox = pyl.figure() #creates a figure
plotObjectBox_ax = plotObjectBox.add_subplot(111, projection='3d') #adds a subplot
togetherlist = zip(coordinate_list,colormap[:len(coordinate_list)-1]) #creates a tuple list
for x,y in togetherlist: #associates each set of coordinates with an html color tag
plotObjectBox_ax.scatter(x[0], x[1],x[2],c=y)
def index(a, x):
"""Locate the leftmost value exactly equal to x, arg a is list, x=key
Returns item if found, returns False if item not found,"""
i = bisect.bisect_left(a, x)
if i != len(a) and a[i] == x:
return i
else:
return False
def timeTrack_recordnew(datetimeseries):
"""Takes in a datetimeseries, returns list of skips [(skiplength, index)...]"""
breaklist = []
mylen = range(0,len(datetimeseries)-1)
for x in mylen:
if datetimeseries[x+1] != datetimeseries[x]+timedelta(seconds=1):
nextstep = x+1
breaklist.append([datetimeseries[nextstep]-datetimeseries[x],x])
else:
continue
return breaklist
def access_DFrow(indextopull,dataFrameToPullFrom):
"""access_DFrow(indextopull,dataFrameToPullFrom)-> return row"""
listToReturn =[] #list to return
for x in dataFrameToPullFrom.keys():
TEMP_chainvar = dataFrameToPullFrom[x]
listToReturn.append(TEMP_chainvar[indextopull])
return listToReturn
def PullDate(date,framename):
timeseries = pd.to_datetime(framename['time'])
startdate = timeseries[0]
return index(timeseries, startdate.replace(day=date,hour=0,second=0,minute=0))
def sliceDF(tupleIndex, frameInUse):
"""Creates a dataframe bookended by a tuple"""
myframe = pd.DataFrame()
for x in frameInUse.keys():
myframe[x]=frameInUse[x][tupleIndex[0]:tupleIndex[1]:1]
return myframe
def SliceMaker(framename,colname):
zippedDateSlices = [] #will hold the tuples of start and end indices
fullDateIndexList = [] #will hold the list of day indexes
for x in range(1,32):
fullDateIndexList.append(PullDate(x,framename))
for x in range(len(fullDateIndexList)):
if x==len(fullDateIndexList)-1:
break
elif fullDateIndexList[x]==False :
continue
else:
mytuple = (fullDateIndexList[x],fullDateIndexList[x+1])
zippedDateSlices.append(mytuple)
listofDayFrames = []
for x in zippedDateSlices:
listofDayFrames.append(sliceDF(x,framename))
return listofDayFrames
def makeKDE(series,clusnum):
""""Series is a series and clusnum is the number of clusters.
Returns a (dataframe,kmeans object)"""
stouse = np.array(series.dropna())
artouse = np.resize(stouse,(len(stouse),1))
kmetouse = cluster.MiniBatchKMeans(n_clusters = clusnum)
kmetouse.fit(artouse)
predtouse = kmetouse.predict(artouse)
frametoret = pd.DataFrame()
ziplist = zip(predtouse,stouse)
for x in range(clusnum):
frametoret[str(x)] = pd.Series([z for y,z in ziplist if y ==x])
return frametoret,kmetouse
def HMMmaker(kclus,DFlist,statenum,s_name):
"""Takes in a kmeans object and a list of dataframes containing days."""
detlist = []
warnings.filterwarnings("ignore", category=DeprecationWarning)
for x in DFlist:
benchHMM=hmm.GaussianHMM(n_components=statenum)
x['pred'+s_name] = kclus.predict(np.resize(x[s_name],(len(x[s_name]),1)))
benchHMM.fit([np.reshape(x['pred'+s_name],(len(x),1))])
print np.linalg.det(benchHMM.transmat_)
detlist.append(np.linalg.det(benchHMM.transmat_))
return detlist
def proper_convert(nanDaylist):
trashlist = []
for x in nanDaylist:
trashlist.append(x.dropna(subset=['hr','accel_magnitude','skin_temp']))
validatedList = []
for x in trashlist:
if len(x)==0 :
print 'Dropped'
else:
validatedList.append(x)
print 'Total dropped:'+str(len(trashlist)-len(validatedList))
return validatedList | {
"repo_name": "codezakh/plotutil",
"path": "plotutil.py",
"copies": "1",
"size": "6575",
"license": "mit",
"hash": -6084061160722497000,
"line_mean": 37.0115606936,
"line_max": 194,
"alpha_frac": 0.7003802281,
"autogenerated": false,
"ratio": 3.3614519427402865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.945369248413059,
"avg_score": 0.021627937341939286,
"num_lines": 173
} |
# a gtk notebook window class
# this module is not officially supported. It may not be part of the PyPlotter
# package in the future!!!
import gtk
from gtk import gdk
try:
import Gfx, gtkGfx, Graph
except ImportError:
from . import Gfx, gtkGfx, Graph
class Canvas(object):
def __init__(self, canvas, pixmap, gfxDriver):
self.canvas = canvas
self.pixmap = pixmap
self.gfxDriver = gfxDriver
__slots__ = ("canvas", "pixmap", "gfxDriver", "configured")
class NotebookWindow(object):
"""A gtk window that contains a notbook on canvas pages."""
def __init__(self, labels = ["page 1", "page 2"], size=(800, 600),
title="Gtk Notebook Window"):
self.redrawHooks = dict.fromkeys(labels, lambda win,label:1)
self.numPages = len(labels)
self.pages = {}
self.win = gtk.Window()
self.win.set_default_size(*size)
self.win.set_size_request(512,384)
self.win.set_resizable(True)
self.win.set_title(title)
self.notebook = gtk.Notebook()
for l in labels:
canvas = gtk.DrawingArea()
canvas.set_size_request(320,240)
pixmap = None
gfxDriver = gtkGfx.Driver(canvas,
canvas.create_pango_layout(""))
self.pages[l] = Canvas(canvas, pixmap, gfxDriver)
self.notebook.append_page(canvas, gtk.Label(l))
canvas.connect("configure-event", self.onConfigure)
canvas.connect("expose-event", self.onExpose)
self.win.add(self.notebook)
self.notebook.show()
# self.win.show_all()
self.win.connect("destroy", lambda w: gtk.main_quit())
def addRedrawHook(self, label, redrawHook = lambda win, label:1):
self.redrawHooks[label] = redrawHook
def get_gfxDriver(self, pageLabel):
"""-> gfxDriver of the page with label 'pageLabel'"""
return self.pages[pageLabel].gfxDriver
def get_currentPage(self):
"""-> label of the current page."""
page = self.notebook.get_nth_page(self.notebook.get_current_page())
label = self.notebook.get_tab_label(page).get_text()
return label
def refresh(self):
"""Refresh the display."""
page = self.notebook.get_nth_page(self.notebook.get_current_page())
label = self.notebook.get_tab_label(page).get_text()
cv = self.pages[label]
gc = cv.canvas.get_style().fg_gc[gtk.STATE_NORMAL]
w, h = cv.pixmap.get_size()
cv.canvas.window.draw_drawable(gc, cv.pixmap, 0,0,0,0,w,h)
def show(self):
self.win.show_all()
def close(self):
"""Close window and finish application."""
self.win.destroy()
gtk.main_quit()
def waitUntilClosed(self):
self.win.show_all()
gtk.main()
def onConfigure(self, widget, event):
for label, cv in list(self.pages.items()):
if cv.canvas == widget: break
else: raise AssertionError("Cannot find widget!")
w, h = widget.window.get_size()
cv.pixmap = gdk.Pixmap(widget.window, w, h)
cv.gfxDriver.changeDrawable(cv.pixmap)
self.redrawHooks[label](self.get_gfxDriver(label))
gc = widget.get_style().fg_gc[gtk.STATE_NORMAL]
widget.window.draw_drawable(gc, cv.pixmap, 0, 0, 0, 0, w, h)
return True
def onExpose(self, widget, event):
for label, cv in list(self.pages.items()):
if cv.canvas == widget: break
else: raise AssertionError("Cannot find widget!")
x, y, w, h = event.area
gc = widget.get_style().fg_gc[gtk.STATE_NORMAL]
widget.window.draw_drawable(gc, cv.pixmap, x, y, x, y, w, h)
return False
def savePage(self, label = None, name=None, format="png"):
if label == None: label = self.get_currentPage()
if name == None: name = label
cv = self.pages[label]
if cv.pixmap == None: return
pixmap = cv.pixmap
w,h = pixmap.get_size()
buf = gdk.Pixbuf(gdk.COLORSPACE_RGB, False, 8, w, h)
buf.get_from_drawable(pixmap, pixmap.get_colormap(), 0,0,0,0,w,h)
buf.save(name, format)
########################################################################
#
# Tests
#
########################################################################
def Test():
def redraw1(gfxDriver):
if isinstance(graph1.gfx, Gfx.nilDriver):
graph1.changeGfx(gfxDriver)
graph1.resizedGfx()
def redraw2(gfxDriver):
if isinstance(graph2.gfx, Gfx.nilDriver):
graph2.changeGfx(gfxDriver)
graph2.resizedGfx()
graph1 = Graph.Cartesian(Gfx.nilDriver(), 0.,0.,1.,1.)
graph2 = Graph.Cartesian(Gfx.nilDriver(), -1.,-1.,1.,1.)
win = NotebookWindow(labels=["graph1", "graph2"])
win.addRedrawHook("graph1", redraw1)
win.addRedrawHook("graph2", redraw2)
win.show()
win.waitUntilClosed()
if __name__ == "__main__":
Test()
| {
"repo_name": "jecki/MetaInductionSim",
"path": "PyPlotter/gtkSupport.py",
"copies": "3",
"size": "5079",
"license": "mit",
"hash": -5204706876796183000,
"line_mean": 35.0212765957,
"line_max": 83,
"alpha_frac": 0.5755069896,
"autogenerated": false,
"ratio": 3.5003445899379737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5575851579537974,
"avg_score": null,
"num_lines": null
} |
"""A guestbook sample with sqlite3."""
import logging
import os
import jinja2
import sqlite3
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import modules
from google.appengine.api import runtime
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'])
DB_FILENAME = os.path.join('/tmp', 'guestbook.sqlite')
CREATE_TABLE_SQL = """\
CREATE TABLE IF NOT EXISTS guestbook
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR, content VARCHAR)"""
SELECT_SQL = 'SELECT * FROM guestbook ORDER BY id DESC LIMIT {}'
INSERT_SQL = 'INSERT INTO guestbook (name, content) VALUES (?, ?)'
POST_PER_PAGE = 20
def shutdown_hook():
"""A hook function for de-registering myself."""
logging.info('shutdown_hook called.')
instance_id = modules.get_current_instance_id()
ndb.transaction(
lambda: ActiveServer.get_instance_key(instance_id).delete())
def get_connection():
"""A function to get sqlite connection.
Returns:
An sqlite connection object.
"""
logging.info('Opening a sqlite db.')
return sqlite3.connect(DB_FILENAME)
def get_url_for_instance(instance_id):
"""Return a full url of the guestbook running on a particular instance.
Args:
A string to represent an VM instance.
Returns:
URL string for the guestbook form on the instance.
"""
hostname = app_identity.get_default_version_hostname()
return 'https://{}-dot-{}-dot-{}/guestbook'.format(
instance_id, modules.get_current_version_name(), hostname)
def get_signin_navigation(original_url):
"""Return a pair of a link text and a link for sign in/out operation.
Args:
An original URL.
Returns:
Two value tuple; a url and a link text.
"""
if users.get_current_user():
url = users.create_logout_url(original_url)
url_linktext = 'Logout'
else:
url = users.create_login_url(original_url)
url_linktext = 'Login'
return url, url_linktext
class ActiveServer(ndb.Model):
"""A model to store active servers.
We use the instance id as the key name, and there are no properties.
"""
@classmethod
def get_instance_key(cls, instance_id):
"""Return a key for the given instance_id.
Args:
An instance id for the server.
Returns:
A Key object which has a common parent key with the name 'Root'.
"""
return ndb.Key(cls, 'Root', cls, instance_id)
class ListServers(webapp2.RequestHandler):
"""A handler for listing active servers."""
def get(self):
"""A get handler for listing active servers."""
key = ndb.Key(ActiveServer, 'Root')
query = ActiveServer.query(ancestor=key)
servers = []
for key in query.iter(keys_only=True):
instance_id = key.string_id()
servers.append((instance_id, get_url_for_instance(instance_id)))
template = JINJA_ENVIRONMENT.get_template('index.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.out.write(template.render(servers=servers,
url=url,
url_linktext=url_linktext))
class MainPage(webapp2.RequestHandler):
"""A handler for showing the guestbook form."""
def get(self):
"""Guestbook main page."""
con = get_connection()
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute(SELECT_SQL.format(POST_PER_PAGE))
greetings = cur.fetchall()
con.close()
template = JINJA_ENVIRONMENT.get_template('guestbook.html')
url, url_linktext = get_signin_navigation(self.request.uri)
self.response.write(template.render(greetings=greetings,
url=url,
url_linktext=url_linktext))
class Guestbook(webapp2.RequestHandler):
"""A handler for storing a message."""
def post(self):
"""A handler for storing a message."""
author = ''
if users.get_current_user():
author = users.get_current_user().nickname()
con = get_connection()
with con:
con.execute(INSERT_SQL, (author, self.request.get('content')))
self.redirect('/guestbook')
class Start(webapp2.RequestHandler):
"""A handler for /_ah/start."""
def get(self):
"""A handler for /_ah/start, registering myself."""
runtime.set_shutdown_hook(shutdown_hook)
con = get_connection()
with con:
con.execute(CREATE_TABLE_SQL)
instance_id = modules.get_current_instance_id()
server = ActiveServer(key=ActiveServer.get_instance_key(instance_id))
server.put()
class Stop(webapp2.RequestHandler):
"""A handler for /_ah/stop."""
def get(self):
"""Just call shutdown_hook now for a temporary workaround.
With the initial version of the VM Runtime, a call to
/_ah/stop hits this handler, without invoking the shutdown
hook we registered in the start handler. We're working on the
fix to make it a consistent behavior same as the traditional
App Engine backends. After the fix is out, this stop handler
won't be necessary any more.
"""
shutdown_hook()
APPLICATION = webapp2.WSGIApplication([
('/', ListServers),
('/guestbook', MainPage),
('/sign', Guestbook),
('/_ah/start', Start),
('/_ah/stop', Stop),
], debug=True)
| {
"repo_name": "googlearchive/appengine-sqlite-guestbook-python",
"path": "main.py",
"copies": "1",
"size": "5760",
"license": "apache-2.0",
"hash": 1246022818866695000,
"line_mean": 29.6382978723,
"line_max": 77,
"alpha_frac": 0.6277777778,
"autogenerated": false,
"ratio": 3.980649619903248,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010638297872340426,
"num_lines": 188
} |
''' A guide renderer for displaying grid lines on Bokeh plots.
'''
from __future__ import absolute_import
from ..core.properties import Auto, Either, Float, Include, Instance, Int, Override, String, Tuple
from ..core.property_mixins import FillProps, LineProps
from .renderers import GuideRenderer
from .tickers import Ticker
class Grid(GuideRenderer):
''' Display horizontal or vertical grid lines at locations
given by a supplied ``Ticker``.
'''
dimension = Int(0, help="""
Which dimension the Axis Grid lines will intersect. The
x-axis is dimension 0 (vertical Grid lines) and the y-axis
is dimension 1 (horizontal Grid lines).
""")
bounds = Either(Auto, Tuple(Float, Float), help="""
Bounds for the rendered grid lines. By default, a grid will look for a
corresponding axis to ask for bounds. If one cannot be found, the grid
will span the entire visible range.
""")
# Note: we must allow the possibility of setting both
# range names be cause if a grid line is "traced" along
# a path, ranges in both dimensions will matter.
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default y-range.
""")
ticker = Instance(Ticker, help="""
The Ticker to use for computing locations for the Grid lines.
""")
grid_props = Include(LineProps, help="""
The %s of the Grid lines.
""")
grid_line_color = Override(default='#e5e5e5')
minor_grid_props = Include(LineProps, help="""
The %s of the minor Grid lines.
""")
minor_grid_line_color = Override(default=None)
band_props = Include(FillProps, help="""
The %s of alternating bands between Grid lines.
""")
band_fill_alpha = Override(default=0)
band_fill_color = Override(default=None)
level = Override(default="underlay")
| {
"repo_name": "dennisobrien/bokeh",
"path": "bokeh/models/grids.py",
"copies": "5",
"size": "2149",
"license": "bsd-3-clause",
"hash": 5343223882948424000,
"line_mean": 30.1449275362,
"line_max": 98,
"alpha_frac": 0.6761284318,
"autogenerated": false,
"ratio": 4.062381852551985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006831601034499585,
"num_lines": 69
} |
""" A guide renderer for displaying grid lines on Bokeh plots.
"""
from __future__ import absolute_import
from ..core.properties import Int, String, Float, Auto, Instance, Tuple, Either, Include, Override
from ..core.property_mixins import FillProps, LineProps
from .renderers import GuideRenderer
from .tickers import Ticker
class Grid(GuideRenderer):
""" Display horizontal or vertical grid lines at locations
given by a supplied ``Ticker``.
"""
dimension = Int(0, help="""
Which dimension the Axis Grid lines will intersect. The
x-axis is dimension 0 (vertical Grid lines) and the y-axis
is dimension 1 (horizontal Grid lines).
""")
bounds = Either(Auto, Tuple(Float, Float), help="""
Bounds for the rendered grid lines. If unset, the grid
lines will span the entire plot in the given dimension.
""")
# Note: we must allow the possibility of setting both
# range names be cause if a grid line is "traced" along
# a path, ranges in both dimensions will matter.
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default y-range.
""")
ticker = Instance(Ticker, help="""
The Ticker to use for computing locations for the Grid lines.
""")
grid_props = Include(LineProps, help="""
The %s of the Grid lines.
""")
grid_line_color = Override(default='#e5e5e5')
minor_grid_props = Include(LineProps, help="""
The %s of the minor Grid lines.
""")
minor_grid_line_color = Override(default=None)
band_props = Include(FillProps, help="""
The %s of alternating bands between Grid lines.
""")
band_fill_alpha = Override(default=0)
band_fill_color = Override(default=None)
level = Override(default="underlay")
| {
"repo_name": "phobson/bokeh",
"path": "bokeh/models/grids.py",
"copies": "3",
"size": "2078",
"license": "bsd-3-clause",
"hash": 9076661190979161000,
"line_mean": 29.5588235294,
"line_max": 98,
"alpha_frac": 0.6742059673,
"autogenerated": false,
"ratio": 4.050682261208577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6224888228508577,
"avg_score": null,
"num_lines": null
} |
""" A guide renderer for displaying grid lines on Bokeh plots.
"""
from __future__ import absolute_import
from ..properties import Int, String, Float, Auto, Instance, Tuple, Either, Include
from ..mixins import FillProps, LineProps
from .renderers import GuideRenderer
from .tickers import Ticker
class Grid(GuideRenderer):
""" Display horizontal or vertical grid lines at locations
given by a supplied ``Ticker``.
"""
dimension = Int(0, help="""
Which dimension the Axis Grid lines will intersect. The
x-axis is dimension 0 (vertical Grid lines) and the y-axis
is dimension 1 (horizontal Grid lines).
""")
bounds = Either(Auto, Tuple(Float, Float), help="""
Bounds for the rendered grid lines. If unset, the grid
lines will span the entire plot in the given dimension.
""")
# Note: we must allow the possibility of setting both
# range names be cause if a grid line is "traced" along
# a path, ranges in both dimensions will matter.
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default y-range.
""")
ticker = Instance(Ticker, help="""
The Ticker to use for computing locations for the Grid lines.
""")
grid_props = Include(LineProps, help="""
The %s of the Grid lines.
""")
band_props = Include(FillProps, help="""
The %s of alternating bands between Grid lines.
""")
| {
"repo_name": "lukebarnard1/bokeh",
"path": "bokeh/models/grids.py",
"copies": "4",
"size": "1719",
"license": "bsd-3-clause",
"hash": 8437811642343330000,
"line_mean": 30.2545454545,
"line_max": 83,
"alpha_frac": 0.6748109366,
"autogenerated": false,
"ratio": 4.192682926829268,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01907166907166907,
"num_lines": 55
} |
""" A guide renderer for displaying grid lines on Bokeh plots.
"""
from __future__ import absolute_import
from ..properties import Int, String, Float, Auto, Instance, Tuple, Either, Include, Override
from ..mixins import FillProps, LineProps
from .renderers import GuideRenderer
from .tickers import Ticker
class Grid(GuideRenderer):
""" Display horizontal or vertical grid lines at locations
given by a supplied ``Ticker``.
"""
dimension = Int(0, help="""
Which dimension the Axis Grid lines will intersect. The
x-axis is dimension 0 (vertical Grid lines) and the y-axis
is dimension 1 (horizontal Grid lines).
""")
bounds = Either(Auto, Tuple(Float, Float), help="""
Bounds for the rendered grid lines. If unset, the grid
lines will span the entire plot in the given dimension.
""")
# Note: we must allow the possibility of setting both
# range names be cause if a grid line is "traced" along
# a path, ranges in both dimensions will matter.
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering a grid on the plot. If unset, use the
default y-range.
""")
ticker = Instance(Ticker, help="""
The Ticker to use for computing locations for the Grid lines.
""")
grid_props = Include(LineProps, help="""
The %s of the Grid lines.
""")
grid_line_color = Override(default='#cccccc')
minor_grid_props = Include(LineProps, help="""
The %s of the minor Grid lines.
""")
minor_grid_line_color = Override(default=None)
band_props = Include(FillProps, help="""
The %s of alternating bands between Grid lines.
""")
band_fill_alpha = Override(default=0)
band_fill_color = Override(default=None)
level = Override(default="underlay")
| {
"repo_name": "htygithub/bokeh",
"path": "bokeh/models/grids.py",
"copies": "2",
"size": "2059",
"license": "bsd-3-clause",
"hash": 3633125932498140000,
"line_mean": 29.2794117647,
"line_max": 93,
"alpha_frac": 0.6726566294,
"autogenerated": false,
"ratio": 4.093439363817097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5766095993217097,
"avg_score": null,
"num_lines": null
} |
"""A GUI, implemented with PyQt4
BeamAnalyzer v0.4.0
Copyright 2014 Evan Murawski
License: MIT
"""
__about = 'BeamAnalyzer v0.4.0\n\nCopyright 2014 Evan Murawski\nLicense: MIT'
__version = 'v0.4.0'
from PyQt4 import QtCore, QtGui
from frontend.guistructure import Ui_Beam
from frontend.forcemomentprompt import Ui_Force_Moment_Dialog
from frontend.distforceprompt import Ui_Dist_Force_Dialog
from backend.interactions import Force, Interaction, InteractionLocationError, Moment, Dist_Force
from backend.beam import Beam
import backend.solver as solver
from backend.solver import SolverError
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
import matplotlib.pyplot as plt
import backend.shearmomentgenerator as shearmomentgenerator
from backend.shearmomentgenerator import Shear_Moment_Error
import numpy as np
from frontend.mainwindow import Ui_MainWindow
from frontend.settingsdialog import Ui_Dialog_settings
import sys
def update_tree(beam):
"""Updates the tree widget based on the beam it is passed."""
ui.treeWidget.clear()
for item in beam.interactions:
ui.treeWidget.addTopLevelItem(QtGui.QTreeWidgetItem(item.to_list()))
def unknown_state_change(lineEdit, label, ui, ok):
"""swaps the visibililty of the lineEdit and label. Updates the ok button state."""
lineEdit.setVisible(not lineEdit.isVisible())
label.setVisible(not label.isVisible())
adjust_ok_buttons_state(ui, ok)
def force_moment_dialog_input_acceptable(ui):
"""Checks if the force moment dialog input is acceptable"""
if ui.lineEdit_2.isVisible():
return ui.lineEdit.hasAcceptableInput() and ui.lineEdit_2.hasAcceptableInput()
else:
return ui.lineEdit.hasAcceptableInput()
def adjust_ok_buttons_state(ui, ok):
"""Adjusts the state of the ok buttons for the force moment dialog"""
if force_moment_dialog_input_acceptable(ui):
ok.setEnabled(True)
else:
ok.setEnabled(False)
def dist_force_dialog_input_acceptable(ui):
"""Checks if the input is acceptable in the dist force dialog."""
return (ui.lineEdit_start.hasAcceptableInput() and ui.lineEdit_end.hasAcceptableInput() and
ui.lineEdit_magnitude.hasAcceptableInput())
def adjust_ok_buttons_state_dist(ui, ok, end_validator):
"""Adjust the state of the ok buttons for teh dist force dialog"""
if dist_force_dialog_input_acceptable(ui):
ok.setEnabled(True)
else:
ok.setEnabled(False)
end_validator.setRange(float(ui.lineEdit_start.text()) if ui.lineEdit_start.text() else beam.length, beam.length, 5)
def interaction_prompt(is_force):
"""Create an force moment dialog if is_force, else a moment dialog."""
#Create the dialog
dialog = QtGui.QDialog()
dialog_ui = Ui_Force_Moment_Dialog()
dialog_ui.setupUi(dialog)
#Set the name
if is_force:
dialog.setWindowTitle("New Force")
else:
dialog.setWindowTitle("New Moment")
#Initially, hide the ok button
ok = dialog_ui.buttonBox.button(QtGui.QDialogButtonBox.Ok)
ok.setEnabled(False)
#Setup input validators
location_validator = QtGui.QDoubleValidator()
location_validator.setRange(0, beam.length, 5)
magnitude_validator = QtGui.QDoubleValidator()
magnitude_validator.setDecimals(5)
#Apply the input validators
dialog_ui.lineEdit.setValidator(location_validator)
dialog_ui.lineEdit_2.setValidator(magnitude_validator)
#Adjust the visibility of the ok button if the input is changed
dialog_ui.lineEdit.textChanged.connect(lambda: adjust_ok_buttons_state(dialog_ui, ok))
dialog_ui.lineEdit_2.textChanged.connect(lambda: adjust_ok_buttons_state(dialog_ui, ok))
#Update the visibility of the input boxes if the checkbox state is changed
dialog_ui.checkBox.stateChanged.connect(lambda: unknown_state_change(dialog_ui.lineEdit_2,
dialog_ui.label_magnitude, dialog_ui, ok))
#Initially, cursor in first line edit box
dialog_ui.lineEdit.setFocus()
#Show the dialog
dialog.exec_()
#If ok is pressed, create the new force / moment
if dialog.result():
if is_force:
if dialog_ui.checkBox.checkState():
interaction = Force(float(dialog_ui.lineEdit.text()), 0, False)
else:
interaction = Force(float(dialog_ui.lineEdit.text()), float(dialog_ui.lineEdit_2.text()))
else:
if dialog_ui.checkBox.checkState():
interaction = Moment(float(dialog_ui.lineEdit.text()), 0, False)
else:
interaction = Moment(float(dialog_ui.lineEdit.text()), float(dialog_ui.lineEdit_2.text()))
#Add the interaction to the beam.
beam.add_interaction(interaction)
update_tree(beam)
def add_force_clicked():
"""If the add force button is clicked, display the prompt"""
interaction_prompt(True)
def add_moment_clicked():
"""If the add moment button is clicked, display the prompt"""
interaction_prompt(False)
def add_distforce_clicked():
#Create the dialog
dialog = QtGui.QDialog()
dialog_ui = Ui_Dist_Force_Dialog()
dialog_ui.setupUi(dialog)
#Initially, hide the ok button
ok = dialog_ui.buttonBox.button(QtGui.QDialogButtonBox.Ok)
ok.setEnabled(False)
#Setup input validators
start_validator = QtGui.QDoubleValidator()
start_validator.setRange(0, beam.length, 5)
end_validator = QtGui.QDoubleValidator()
end_validator.setRange(0, beam.length, 5)
magnitude_validator = QtGui.QDoubleValidator()
magnitude_validator.setDecimals(5)
#Apply the input validators
dialog_ui.lineEdit_start.setValidator(start_validator)
dialog_ui.lineEdit_end.setValidator(end_validator)
dialog_ui.lineEdit_magnitude.setValidator(magnitude_validator)
#Adjust the visibility of the ok button if the input is changed
dialog_ui.lineEdit_start.textChanged.connect(lambda: adjust_ok_buttons_state_dist(dialog_ui, ok, end_validator))
dialog_ui.lineEdit_end.textChanged.connect(lambda: adjust_ok_buttons_state_dist(dialog_ui, ok, end_validator))
dialog_ui.lineEdit_magnitude.textChanged.connect(lambda: adjust_ok_buttons_state_dist(dialog_ui, ok, end_validator))
#Set the focus
dialog_ui.lineEdit_start.setFocus()
#Show the dialog
dialog.exec_()
#If ok is pressed, create the new distributed force
if dialog.result():
interaction = Dist_Force(float(dialog_ui.lineEdit_start.text()), float(dialog_ui.lineEdit_magnitude.text()),
float(dialog_ui.lineEdit_end.text()))
beam.add_interaction(interaction)
update_tree(beam)
def solve_clicked():
"""Solve is clicked - solve the beam and update the tree."""
try:
solver.solve(beam)
except SolverError as e:
QtGui.QMessageBox.warning(window,"Error", str(e))
return
update_tree(beam)
def plot_clicked():
"""Generate and display the force moment plot."""
if len(beam.interactions) < 1:
QtGui.QMessageBox.warning(window,"Error", "There is nothing to plot.")
return
#Clear the plot
plt.clf()
#Generate the shear and moment points, using generate_numerical
try:
shear_moment = shearmomentgenerator.generate_numerical(beam, step_size)
except Shear_Moment_Error as e:
QtGui.QMessageBox.warning(window,"Error", str(e))
return
#Plot the points
x = np.arange(0, beam.length, step_size)
shear = [y[0] for y in shear_moment]
moment = [y[1] for y in shear_moment]
shear_plot = figure.add_subplot(211)
shear_plot.plot(x, shear)
plt.title('Shear')
moment_plot = figure.add_subplot(212)
moment_plot.plot(x, moment)
plt.title('Moment')
#apply a buffer around the plot for easier viewing
shear_plot.axis([min(x), max(x), min(shear) - plot_margin * (max(shear)-min(shear)), max(shear) +
plot_margin * (max(shear)-min(shear))])
moment_plot.axis([min(x), max(x), min(moment) - plot_margin * (max(moment)-min(moment)), max(moment) +
plot_margin * (max(moment)-min(moment))])
#update the canvas
canvas.draw()
def clear_clicked():
"""Clear the beam of all interactions. Update the tree and clear the force moment plot."""
global beam
beam = Beam(beam.length)
update_tree(beam)
plt.clf()
canvas.draw()
def new_clicked():
"""Prompt for a new beam length. If input is ok, clear the beam, tree, and plot.
Create a new beam of the given length."""
global beam
length, ok = QtGui.QInputDialog.getDouble(window, "Beam Length",
"Enter the length of the beam:", 0, 0, sys.float_info.max, 5)
if ok:
beam = Beam(length)
update_tree(beam)
plt.clf()
canvas.draw()
def quit_clicked():
"""Quit the application."""
app.quit()
def settings_clicked():
"""Create a settings dialog, containing the option to change the step size.
If a valid new step size is entered, update the step size."""
global step_size
#Create the dialog
dialog = QtGui.QDialog()
dialog_ui = Ui_Dialog_settings()
dialog_ui.setupUi(dialog)
#Populate the line edit
dialog_ui.lineEdit_step.setText(str(step_size))
#set up the validator and apply it
step_validator = QtGui.QDoubleValidator()
step_validator.setRange(0.000001, 1000, 6)
dialog_ui.lineEdit_step.setValidator(step_validator)
dialog.exec_()
#Update the step size if necessary
if dialog.result():
if dialog_ui.lineEdit_step.text() and float(dialog_ui.lineEdit_step.text()) != 0:
step_size = float(dialog_ui.lineEdit_step.text())
def about_clicked():
"""Show the about dialog."""
QtGui.QMessageBox.about(window, "About BeamAnalyzer", __about)
def clear_selected_clicked():
"""Clear the selected item, update the tree and clear the plot if something was removed."""
items = ui.treeWidget.selectedItems()
removed = False
for item in items:
index = ui.treeWidget.indexOfTopLevelItem(item)
if index != -1:
removed = True
beam.interactions.pop(index)
if removed:
update_tree(beam)
plt.clf()
canvas.draw()
def make_first_beam():
"""Make the first beam. This is required. If it is not done, exit the app."""
ok = False
length, ok = QtGui.QInputDialog.getDouble(window, "Beam Length",
"Enter the length of the beam:", 0, 0, sys.float_info.max, 5)
if ok:
return Beam(length)
else:
sys.exit()
def make_links():
"""Establish the links between various actions and their corresponding functions."""
ui.pushButton_force.clicked.connect(add_force_clicked)
ui.pushButton_moment.clicked.connect(add_moment_clicked)
ui.pushButton_distforce.clicked.connect(add_distforce_clicked)
ui.pushButton_solve.clicked.connect(solve_clicked)
ui.pushButton_plot.clicked.connect(plot_clicked)
ui.pushButton_clear.clicked.connect(clear_clicked)
ui.pushButton_new.clicked.connect(new_clicked)
ui.pushButton_clearselected.clicked.connect(clear_selected_clicked)
main_window_ui.actionQuit.triggered.connect(quit_clicked)
main_window_ui.actionAbout.triggered.connect(about_clicked)
main_window_ui.actionSettings.triggered.connect(settings_clicked)
if __name__ == '__main__':
#Global vars
step_size = 0.01
plot_margin = 0.15
#Setup UI window
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
ui = Ui_Beam()
ui.setupUi(window)
#Setup main window
main_window = QtGui.QMainWindow()
main_window_ui = Ui_MainWindow()
main_window_ui.setupUi(main_window)
main_window.setCentralWidget(window)
#setup matplotlib
figure = plt.figure()
canvas = FigureCanvas(figure)
toolbar = NavigationToolbar(canvas, window)
ui.verticalLayout_3.addWidget(toolbar)
ui.verticalLayout_3.addWidget(canvas)
#Show the window, maximized
main_window.showMaximized()
#setup links
make_links()
#setup beam
beam = make_first_beam()
#Exit shell when window exits
sys.exit(app.exec_()) | {
"repo_name": "EvanMurawski/BeamAnalyzer",
"path": "beamanalyzer/guiinterface.py",
"copies": "1",
"size": "12446",
"license": "mit",
"hash": 5177508034015693000,
"line_mean": 30.1175,
"line_max": 120,
"alpha_frac": 0.6832717339,
"autogenerated": false,
"ratio": 3.7319340329835082,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.987422643409103,
"avg_score": 0.008195866558495624,
"num_lines": 400
} |
# A Gui interface allowing the binary illiterate to figure out the ip address the Arduino has been assigned.
import os
import re
from PySide.QtCore import QFile, QMetaObject, QSignalMapper, Slot, QRegExp
from PySide.QtGui import QDialog, QPushButton, QRegExpValidator
from PySide.QtUiTools import QUiLoader
class IPHelper(QDialog):
def __init__(self, parent=None):
super(IPHelper, self).__init__(parent)
f = QFile(os.path.join(os.path.split(__file__)[0], 'iphelper.ui'))
loadUi(f, self)
f.close()
self.ipAddress = None
# create validators
validator = QRegExpValidator(QRegExp('\d{,3}'))
self.uiFirstTetTXT.setValidator(validator)
self.uiSecondTetTXT.setValidator(validator)
self.uiThirdTetTXT.setValidator(validator)
self.uiFourthTetTXT.setValidator(validator)
# build a map of the buttons
self.buttons = [None]*16
self.signalMapper = QSignalMapper(self)
self.signalMapper.mapped.connect(self.tetMap)
for button in self.findChildren(QPushButton):
match = re.findall(r'^uiTrellis(\d{,2})BTN$', button.objectName())
if match:
i = int(match[0])
self.buttons[i] = button
if i >= 12:
self.signalMapper.setMapping(button, i)
button.clicked.connect(self.signalMapper.map)
self.tetMap(12)
@Slot()
def accept(self):
self.ipAddress = '{}.{}.{}.{}'.format(self.uiFirstTetTXT.text(), self.uiSecondTetTXT.text(), self.uiThirdTetTXT.text(), self.uiFourthTetTXT.text())
super(IPHelper, self).accept()
@Slot(int)
def tetMap(self, index):
button = self.buttons[index]
if not button.isChecked():
return
for i in range(12, 16):
b = self.buttons[i]
if b != button:
b.setChecked(False)
# update the buttons to match the current value of the text
for edit in (self.uiFirstTetTXT, self.uiSecondTetTXT, self.uiThirdTetTXT, self.uiFourthTetTXT):
edit.setProperty('active', False)
if index == 12:
val = int(self.uiFourthTetTXT.text())
self.uiFourthTetTXT.setProperty('active', True)
elif index == 13:
val = int(self.uiThirdTetTXT.text())
self.uiThirdTetTXT.setProperty('active', True)
elif index == 14:
val = int(self.uiSecondTetTXT.text())
self.uiSecondTetTXT.setProperty('active', True)
elif index == 15:
val = int(self.uiFirstTetTXT.text())
self.uiFirstTetTXT.setProperty('active', True)
for i in range(8):
b = self.buttons[i]
b.blockSignals(True)
b.setChecked(2**i & val)
b.blockSignals(False)
# force a refresh of the styleSheet
self.setStyleSheet(self.styleSheet())
@Slot()
def buttonPressed(self):
total = 0
for i in range(8):
if self.buttons[i].isChecked():
total += 2**i
total = unicode(total)
if self.uiTrellis12BTN.isChecked():
self.uiFourthTetTXT.setText(total)
elif self.uiTrellis13BTN.isChecked():
self.uiThirdTetTXT.setText(total)
elif self.uiTrellis14BTN.isChecked():
self.uiSecondTetTXT.setText(total)
elif self.uiTrellis15BTN.isChecked():
self.uiFirstTetTXT.setText(total)
# Code to load a ui file like using PyQt4
# https://www.mail-archive.com/pyside@lists.openbossa.org/msg01401.html
class MyQUiLoader(QUiLoader):
def __init__(self, baseinstance):
super(MyQUiLoader, self).__init__()
self.baseinstance = baseinstance
def createWidget(self, className, parent=None, name=""):
widget = super(MyQUiLoader, self).createWidget(className, parent, name)
if parent is None:
return self.baseinstance
else:
setattr(self.baseinstance, name, widget)
return widget
def loadUi(uifile, baseinstance=None):
loader = MyQUiLoader(baseinstance)
ui = loader.load(uifile)
QMetaObject.connectSlotsByName(ui)
return ui
| {
"repo_name": "MHendricks/Motionbuilder-Remote",
"path": "iphelper.py",
"copies": "1",
"size": "3612",
"license": "mit",
"hash": -5365688135667160000,
"line_mean": 31.5405405405,
"line_max": 149,
"alpha_frac": 0.7203765227,
"autogenerated": false,
"ratio": 2.9082125603864735,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41285890830864735,
"avg_score": null,
"num_lines": null
} |
'''A GUI program to analyze data from the geiger counter experiment'''
# The GUI package for python
import Tkinter as tk
import tkFileDialog as tkfd
import tkMessageBox as tkmb
import tkSimpleDialog as tksd
# Our analysis tools
import analyzeData as ad
import PlotData as pd
class App:
'''The driver class for the program, which contains all of the methods and
GUI elements for the program'''
def __init__(self, master):
'''Creates a new App window from scratch'''
#save the root TK frame
self.root = master
#create a frame for all of the buttons
frame = tk.Frame(self.root)
frame.pack()
#Quit button
self.quitButton = tk.Button(frame, text="Quit", fg="red", command=frame.quit)
self.quitButton.pack(side=tk.LEFT)
#label to display the name of the currently loaded file
self.fileName = tk.Label(frame, text="No File")
self.fileName.pack(side=tk.BOTTOM)
#frame to hold the analysis buttons
buttonFrame = tk.Frame(frame)
#add all of the buttons for analysis
self.addMenuButtons(frame)
buttonFrame.pack(side=tk.LEFT)
#the DataSet that is being analyzed
self.dataSet = None
#label for .wav file import progress
self.progressLabel = None
def addMenuButtons(self, master):
'''Add the standard set of analysis buttons to the given master frame.'''
#list of buttons and associated actions to execute on click
buttons = [
("Calibrate .wav Importer", self.showThresholdGraph),
("Import .wav File", self.importWavFile),
("Import Saved Data", self.openBinary),
("Save Data to Disk", self.saveBinary),
("Export Data in Plaintext", self.export),
("Plot Count Rate vs. Time", self.getCountRate),
("Plot Count Rate Histogram", self.getCountRateHist),
("Plot Intervals Histogram", self.getIntervals),
("Get Total Counts", self.getTotalCounts)
]
#create the buttons
for label, cmd in buttons:
b = tk.Button(master, text=label, command=cmd)
b.pack(anchor=tk.S)
# to do: update labels
def importWavFile(self):
'''Get the data from a .wav file. Prompts the user with a file dialog.
Prompts the user for a threshold value. This is the value of the .wav
data that will trigger a count (higher = stricter, fewer counts detected).'''
#prompt for file name
filename = tkfd.askopenfilename(title="Pick your .wav file", initialdir=".", parent=self.root)
if not filename:
return
print filename
#grab the data rate (points per second) and a list of data
rate, wavData = ad.DataSet.readWaveFile(filename)
#grab threshold from user
labelText = "Enter the threshold for detecting a count. If you don't know what this means, read the lab manual!"
threshold = tksd.askinteger("Threshold", labelText, parent=self.root, minvalue=0, initialvalue=15000)
if not threshold:
return
#list of the time of each count
times = []
# label for showing import progress
if self.progressLabel: # if it already exists
pass
else:
self.progressLabel = tk.Label(self.root)
self.progressLabel.pack(side=tk.BOTTOM)
#boolean for if the current data is during a count or not during a count
# 0 means no count, 1 means count
aboveThreshold = 0
# Print the total number of data points as reference to the user
print "Total number of data points: ", len(wavData)
#loop through data and grab count times
for i, level in enumerate(wavData):
#update the progress label every second (i % rate == 0)
if i % rate == 0:
self.updateFileProgress(self.progressLabel, i, len(wavData))
self.root.update_idletasks()
print "gui reached ", i, "data points"
#check if the current data level is above the threshold
aboveThreshold = ad.DataSet.fromWaveData(i, level, aboveThreshold, times, rate, threshold)
#when the loop is finished, update the label one last time
self.progressLabel.configure(text="Import Progress: 100%")
print "numCounts = " + str(len(times))
#length of file (in seconds) is used in class ad.DataSet
fileLength = float(len(wavData))/float(rate)
# create DataSet
self.dataSet = ad.DataSet(times, rate, fileLength)
print "imported"
#display the name of the current file in a label
self.updateFileLabel(filename)
def updateFileProgress(self, label, value, maximum):
'''Updates the label with the expression "Progress: <percent complete>%",
where <percent complete> is the first two characters of value/maximum'''
text = "Progress: " + str(value/float(maximum) * 100)[0:2] + "%"
label.configure(text=text)
def getCountRate(self):
'''Plots the count rate as a function of time. The rates are calculated
for each "bin" (i.e. independent sample)'''
#Check if data has been imported. if not, give a warning
if self.dataSet:
#prompt for desired bin spacing
labelText = "Enter the desired sample length in seconds"
binSpacing = tksd.askfloat("Count Rate", labelText, parent=self.root, minvalue=0, initialvalue=1)
if not binSpacing:
return
#plot the count rate in matplotlib
pd.plotCountRate(self.dataSet, binSpacing)
else:
#say that you need to import data first
self.showImportAlert()
def getCountRateHist(self):
'''Plots a histogram of the count rate. The number of bins is
for the histogram, and the sample length is how long the count rate
is averaged over (equivalent to "sample length" for the count rate
vs. time graph.'''
#check if data has been imported. if not, give a warning
if self.dataSet:
#ask for number of bins for histogram
labelText = "Enter the number of bins"
numBins = tksd.askinteger("Count Rate Histogram", labelText, parent=self.root, minvalue=1)
if not numBins:
return
#ask for length of sample to calculate count rate
labelText = "Enter the sample length (seconds)"
sampleLength = tksd.askfloat("Sample Length", labelText, parent=self.root, minvalue=0)
if not sampleLength:
return
#plot histogram in matplotlib
pd.plotHistOfCountRates(self.dataSet, sampleLength, numBins)
else:
self.showImportAlert()
def getIntervals(self):
'''Plots a histogram of the time interval length between consecutive
counts.'''
#check if data has been imported. if not, give a warning
if self.dataSet:
#ask for the number of bins for the histogram
labelText = "Enter the desired number of bins in the histogram"
numBins = tksd.askinteger("Intervals Histogram", labelText, parent=self.root, minvalue=1)
if not numBins:
return
#plot the histogram in matplotlib
pd.plotHistOfIntervals(self.dataSet, numBins)
else:
self.showImportAlert()
def getTotalCounts(self):
'''Displays the total number of counts in a given sample'''
#check if data has been imported. if not, give a warning
if self.dataSet:
#fetch the value from the stored data
totalCounts = self.dataSet.getTotalCounts()
#display the value in a window
print totalCounts
tkmb.showinfo("Total counts", str(totalCounts) + " counts total")
else:
self.showImportAlert()
def saveBinary(self):
'''Saves the currently imported data in a binary file for easy import
in the future. Prompts for a file name in a dialog.
This is much faster than importing from .wav file so it should be
used every time except for the first that the data is imported.'''
#check if data has been imported. if not, give a warning
if self.dataSet:
#prompt for the save location
filename = tkfd.asksaveasfilename(initialdir=".", title="Save Data")
if not filename:
return
self.dataSet.save(filename)
print "saved"
else:
#say that you need to import data first
self.showImportAlert()
def export(self):
'''Exports the count times to a return-separated text file with a one-line header'''
#check if data has been imported. if not, give a warning
if self.dataSet:
#prompt for the save location
filename = tkfd.asksaveasfilename(initialdir=".", title="Export")
if not filename:
return
self.dataSet.exportCSV(filename)
print "exported"
else:
#say that you need to import data first
self.showImportAlert()
def openBinary(self):
'''Imports a previously-saved binary file containing a data set.
This is much faster than importing from .wav file so it should be
used every time except for the first that the data is imported.'''
#prompt for file name
filename = tkfd.askopenfilename(initialdir=".", title="Open File")
if not filename:
return
#import the file and record the DataSet
self.dataSet = ad.DataSet.fromSavedFile(filename)
print "opened"
#update the label showing the current file
self.updateFileLabel(filename)
def showImportAlert(self):
'''Show an alert dialog indicating there is no active data.'''
#show the alert dialog
tkmb.showerror("No Data", "You need to import data first!")
def updateFileLabel(self, filename):
'''Update the label that displays the currently active file.'''
#show the first directory in the path and the file name:
# e.g. Users/.../data.wav
# or Users\...\data.wav
#different file separator for windows and unix
#first, unix
if(filename.find("/") >= 0):
path = filename.split("/")
self.fileName.configure(text=path[1] + "/.../" + path[len(path)-1])
else:
#escape backslashes! \ => \\
path = filename.split("\\")
self.fileName.configure(text=path[1] + "\\...\\" + path[len(path)-1])
def showThresholdGraph(self):
'''Shows a graph of the .wav amplitudes so that the user can figure out a good threshold'''
#pass
#import the first 5 seconds of the .wav file
#prompt for file name
filename = tkfd.askopenfilename(title="Pick your .wav file", initialdir=".", parent=self.root)
if not filename:
return
rate, wavData = ad.DataSet.readWaveFile(filename)
#get the first 5 seconds of data
numEntriesFor5Sec = rate * 5;
first5Sec = wavData[1:numEntriesFor5Sec];
#plot data
pd.plot(first5Sec, "Calibrate the Threshold!")
# Create the Tk application window
root = tk.Tk()
# Create the App which is defined above
app = App(root)
# Give the app window a title
app.root.title("Geiger Counter Analysis")
# Run the app (mainloop is tk syntax for "go")
root.mainloop()
| {
"repo_name": "musicalrunner/Geiger-Counter",
"path": "GUI.py",
"copies": "2",
"size": "11856",
"license": "mit",
"hash": 4015887718166713300,
"line_mean": 33.9734513274,
"line_max": 120,
"alpha_frac": 0.6129385965,
"autogenerated": false,
"ratio": 4.370070033173609,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008501533126379704,
"num_lines": 339
} |
"""A hack to allow safe clearing of the cache in django.contrib.sites.
Since django.contrib.sites may not be thread-safe when there are
multiple instances of the application server, we're patching it with
a thread-safe structure and methods that use it underneath.
"""
import threading
from django.contrib.sites.models import Site, SiteManager
from django.core.exceptions import ImproperlyConfigured
from django.http.request import split_domain_port
lock = threading.Lock()
with lock:
THREADED_SITE_CACHE = {}
def new_get_current(self, request=None):
from django.conf import settings
if getattr(settings, 'SITE_ID', ''):
site_id = settings.SITE_ID
if site_id not in THREADED_SITE_CACHE:
with lock:
site = self.prefetch_related('settings').filter(pk=site_id)[0]
THREADED_SITE_CACHE[site_id] = site
return THREADED_SITE_CACHE[site_id]
elif request:
host = request.get_host()
try:
# First attempt to look up the site by host with or without port.
if host not in THREADED_SITE_CACHE:
with lock:
site = self.prefetch_related('settings').filter(
domain__iexact=host)[0]
THREADED_SITE_CACHE[host] = site
return THREADED_SITE_CACHE[host]
except Site.DoesNotExist:
# Fallback to looking up site after stripping port from the host.
domain, dummy_port = split_domain_port(host)
if domain not in THREADED_SITE_CACHE:
with lock:
site = self.prefetch_related('settings').filter(
domain__iexact=domain)[0]
THREADED_SITE_CACHE[domain] = site
return THREADED_SITE_CACHE[domain]
raise ImproperlyConfigured(
"You're using the Django sites framework without having"
" set the SITE_ID setting. Create a site in your database and"
" set the SITE_ID setting or pass a request to"
" Site.objects.get_current() to fix this error.")
def new_clear_cache(self):
global THREADED_SITE_CACHE
with lock:
THREADED_SITE_CACHE = {}
def new_get_by_natural_key(self, domain):
return self.prefetch_related('settings').filter(domain__iexact=domain)[0]
def patch_contrib_sites():
SiteManager.get_current = new_get_current
SiteManager.clear_cache = new_clear_cache
SiteManager.get_by_natural_key = new_get_by_natural_key
| {
"repo_name": "UITools/saleor",
"path": "saleor/site/patch_sites.py",
"copies": "3",
"size": "2508",
"license": "bsd-3-clause",
"hash": 983979381103566300,
"line_mean": 36.4328358209,
"line_max": 78,
"alpha_frac": 0.6427432217,
"autogenerated": false,
"ratio": 3.980952380952381,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.612369560265238,
"avg_score": null,
"num_lines": null
} |
# A hack to find "haikus" in English text. For the purposes of this program
# a "haiku" is one or more complete sentences that, together, can be broken
# into groups of 5, 7, and 5 syllables. Each canididate haiku line, and then
# the entire haiku, has to make it through a few heuristics to filter out
# constructions that are likely to scan awkwardly (like verb phrases split
# across lines). Since this code doesn't really try to understand the texts,
# it might throw away a few legitimate phrases, and it certainly lets through
# some bad ones.
#
# Any improvements would be welcomed.
#
# License:
#
# Copyright (c) 2009, Jonathan Feinberg <jdf@pobox.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import with_statement
import nltk
import re
import pickle
import gzip
import os.path
import sys
def file(relpath):
return os.path.join(os.path.dirname(__file__), relpath)
def read_alternates(which):
with open(file('data/awkward_%s'%which), 'r') as baddies:
return '|'.join([e.strip() for e in baddies.readlines() if len(e.strip()) > 0])
single_line_filters = [
re.compile(r'^[a-z][^.?!;:]+([.?!;:]+[^.?!;:]+)+$', re.IGNORECASE),
re.compile(r'[.?!;:]+\s+[\'"]?[A-Za-z]+(?:\'[a-z]+)?$'),
]
single_line_filters.append(re.compile(r'^(?:%s)\b'%read_alternates('starts')))
single_line_filters.append(re.compile(r'\b(?:%s)$'%read_alternates('ends'), re.IGNORECASE))
first_word_comma = re.compile(r'^\s*[a-z]\w*,')
with open(file('data/awkward_breaks'), 'r') as breaks:
alts = '|'.join([r'\b%s\b' % ('\n'.join(e.strip().split())) for e in breaks.readlines() if len(e.strip()) > 0]
+ ['[^\'".?!;:,]\n[a-z]+(?:\'[a-z]+)?[".?!;:]+.',
'"\S+\n\S+"',
])
break_filter = re.compile(alts, re.IGNORECASE)
# load the syllable-count dictionary
with open(file('cmudict/cmudict.pickle'), 'rb') as p:
syllables = pickle.load(p)
with open(file('cmudict/custom.dict'), 'r') as p:
for line in p.readlines():
if not len(line):
continue
(word, count) = line.split()
syllables[word] = int(count)
# Use the NLTK to determine sentence boundaries.
sentence_tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
number_syllables = (
# 0 1 2 3 4 5 6 7 8 9
2, 1, 1, 1, 1, 1, 1, 2, 1, 1,
1, 3, 1, 2, 2, 2, 2, 3, 2, 2,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
3, 4, 4, 4, 4, 4, 4, 5, 4, 4,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
2, 3, 3, 3, 3, 3, 3, 4, 3, 3,
)
has_digit = re.compile(r'\d')
ordinal = re.compile(r'^(\d\d?)(?:rd|th|st)$', re.IGNORECASE)
too_many_digits = re.compile('\d\d\d')
short_time = re.compile(r'^([1-2]?[0-9])(?:[ap]m)$',re.IGNORECASE)
time = re.compile(r'^([1-2]?[0-9]):(\d\d)([ap]m)?$',re.IGNORECASE)
word_shapes = (
re.compile(r'^[^a-z0-9\$@]*([-@&_0-9a-z\+]+(?:\'[a-z]+)?)[^a-z0-9]*$', re.IGNORECASE),
re.compile(r'^[^\$]*(\$\d+(?:\.\d{1,2})?)[^a-z0-9]*$', re.IGNORECASE),
re.compile(r'^[^a-z0-9]*([1-2]?[0-9]:\d\d(\s*[ap]m)?)[^a-z0-9]*$', re.IGNORECASE),
)
class Nope(Exception):
pass
class TooShort(Exception):
pass
class LineSyllablizer:
def __init__(self, line, unknown_word_handler=None):
self.words = line.split()
self.index = 0
self.lines = []
self.unknown_word_handler = unknown_word_handler
def _count_chunk_syllables(self, chunk):
if has_digit.search(chunk):
return number_syllables[int(chunk)]
else:
return syllables[chunk]
def _count_syllables(self, word, splitter=re.compile(r'(?<=\D)(?=\d)|(?<=\d)(?=\D)')):
"Raises KeyError, Nope"
if not word or len(word) == 0:
return 0
if 'http:' in word:
raise Nope
if '0' == word[0] and len(word) > 1:
return 1 + self._count_syllables(word[1:]) # oh seven
if '$' == word[0]:
return 2 + self._count_syllables(word[1:]) # 13 dollars
if '@' == word[0]:
return 1 + self._count_syllables(word[1:]) # user name
if '&' in word and len(word) > 1:
return 1 + sum(self._count_syllables(w) for w in word.split('&'))
if '-' in word:
return sum(self._count_syllables(w) for w in word.split('-'))
if '_' in word:
return sum(self._count_syllables(w) for w in word.split('_'))
if not has_digit.search(word):
return syllables[word]
if too_many_digits.search(word):
raise Nope
m = short_time.match(word)
if m:
return 2 + number_syllables[int(m.group(1))]
m = time.match(word)
if m:
if m.group(2) == '00':
minutes = 2
else:
minutes = number_syllables[int(m.group(2))]
partial = number_syllables[int(m.group(1))] + minutes
if m.group(3):
return 2 + partial
return partial
m = ordinal.match(word)
if m:
return number_syllables[int(m.group(1))]
count = 0
start = 0
for m in splitter.finditer(word):
boundary = m.start()
count += self._count_chunk_syllables(word[start:boundary])
start = boundary
count += self._count_chunk_syllables(word[start:])
return count
def clean(self, word):
for shape in word_shapes:
m = shape.match(word)
if m:
return m.group(1).upper()
return None
def count_syllables(self):
si = 0
syllable_count = 0
try:
for word in self.words:
syllable_count += self._count_syllables(self.clean(word))
except KeyError:
print("I don't know '%s'"%word)
return -1
except Nope:
print("I can't do '%s'"%word)
return -1
return syllable_count
def seek(self, n):
si = self.index
syllable_count = 0
try:
while syllable_count < n:
word = self.clean(self.words[self.index])
syllable_count += self._count_syllables(word)
self.index += 1
except KeyError:
if word and self.unknown_word_handler:
self.unknown_word_handler(word)
raise Nope
except IndexError:
raise TooShort
if syllable_count > n:
raise Nope
line = ' '.join(self.words[si:self.index])
for f in single_line_filters:
if f.search(line):
raise Nope
self.lines.append(line)
def seek_eol(self):
if self.index != len(self.words):
raise Nope
def bad_split(self, n):
return awkward_in_front_without_punct_before.search(self.lines[n]) and not self.lines[n - 1][-1] in (',', ';', '-')
def find_haiku(self):
self.seek(5)
self.seek(7)
self.seek(5)
self.seek_eol()
if first_word_comma.search(self.lines[1]) or first_word_comma.search(self.lines[2]):
raise Nope
if break_filter.search('\n'.join(self.lines)):
raise Nope
return self.lines
class HaikuFinder:
def __init__(self, text, unknown_word_handler=None):
self.lines = sentence_tokenizer.tokenize(text)
self.unknown_word_handler = unknown_word_handler
def find_haikus(self):
haikus = []
line_index = 0
line_count = len(self.lines)
while line_index < line_count:
offset = 0
line = ""
while line_index + offset < line_count:
line = "%s %s" % (line, self.lines[line_index + offset])
try:
haikus.append(LineSyllablizer(line, self.unknown_word_handler).find_haiku())
break
except Nope:
break
except TooShort:
offset += 1
line_index += 1
return haikus
@classmethod
def add_word(cls, word, syllable_count):
syllables[word.upper()] = syllable_count
def find_haikus(text, unknown_word_handler=None):
return HaikuFinder(text, unknown_word_handler).find_haikus()
def count_syllables(text):
return LineSyllablizer(text).count_syllables()
| {
"repo_name": "jdf/haikufinder",
"path": "haikufinder/__init__.py",
"copies": "1",
"size": "10271",
"license": "bsd-3-clause",
"hash": 2442744563536365600,
"line_mean": 36.2137681159,
"line_max": 123,
"alpha_frac": 0.5526238925,
"autogenerated": false,
"ratio": 3.4431780087160577,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9406709717404176,
"avg_score": 0.017818436762376374,
"num_lines": 276
} |
# A handler for all of the information which may be passed in on the command
# line. This singleton object should be able to handle the input, structure
# it and make it available in a useful fashion.
#
# This is a hook into a global data repository, should mostly be replaced with
# a Phil interface.
import collections
import copy
import logging
import os
import re
import sys
from dials.util import Sorry
from dxtbx.serialize import load
from xia2.Experts.FindImages import image2template_directory
from xia2.Handlers.Environment import which
from xia2.Handlers.Flags import Flags
from xia2.Handlers.Phil import PhilIndex
from xia2.Handlers.PipelineSelection import add_preference
from xia2.Schema import imageset_cache, update_with_reference_geometry
from xia2.Schema.XProject import XProject
logger = logging.getLogger("xia2.Handlers.CommandLine")
PATTERN_VALID_CRYSTAL_PROJECT_NAME = re.compile(r"[a-zA-Z_]\w*$")
def load_experiments(filename):
experiments = load.experiment_list(filename, check_format=False)
imagesets = experiments.imagesets()
params = PhilIndex.get_python_object()
reference_geometry = params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
update_with_reference_geometry(imagesets, reference_geometry)
for imageset in imagesets:
template = imageset.get_template()
if template not in imageset_cache:
imageset_cache[template] = collections.OrderedDict()
imageset_cache[template][imageset.get_scan().get_image_range()[0]] = imageset
def unroll_parameters(hdf5_master):
"""Determine auto-unroll parameters for Eiger data sets with multiple
triggers - will mean several assumptions are made i.e. all are the
same size and so on."""
assert hdf5_master.endswith(".h5")
import h5py
try:
with h5py.File(hdf5_master, "r") as master:
root = master["/entry/instrument/detector"]
ntrigger = root["detectorSpecific/ntrigger"][()]
nimages = root["detectorSpecific/nimages"][()]
if ntrigger > 1 and nimages > 1:
return ntrigger, nimages
except Exception:
return None
def unroll_datasets(datasets):
"""Unroll datasets i.e. if input img:1:900:450 make this into 1:450;
451:900"""
unrolled = []
for dataset in datasets:
tokens = dataset.split(":")
if len(tokens[0]) == 1:
# because windows
tokens = ["%s:%s" % (tokens[0], tokens[1])] + tokens[2:]
if tokens[0].endswith(".h5") and len(tokens) != 4:
# check if we need to auto-discover the unrolling parameters
# for multiple trigger data sets
unroll_params = unroll_parameters(tokens[0])
if unroll_params:
ntrigger, nimages = unroll_params
if len(tokens) == 1:
tokens = [tokens[0], "1", ntrigger * nimages, nimages]
elif len(tokens) == 3:
tokens.append(nimages)
if len(tokens) in (1, 3):
unrolled.append(dataset)
continue
if len(tokens) != 4:
raise RuntimeError(
"Dataset ranges must be passed as "
"/path/to/image_0001.cbf:start:end[:chunk]"
)
start, end, incr = list(map(int, tokens[1:]))
if start + incr - 1 > end:
raise RuntimeError("chunk size greater than total")
for s in range(start, end, incr):
e = s + incr - 1
if e > end:
e = end
unrolled.append("%s:%d:%d" % (tokens[0], s, e))
return unrolled
def validate_project_crystal_name(parameter, value):
if not PATTERN_VALID_CRYSTAL_PROJECT_NAME.match(value):
raise Sorry(
"%s name must consist only of alphanumeric characters and underscores. "
"The first character must be a non-digit character." % parameter
)
class _CommandLine:
"""A class to represent the command line input."""
def __init__(self):
"""Initialise all of the information from the command line."""
self._argv = []
self._understood = []
self._default_template = []
self._default_directory = []
self._hdf5_master_files = []
self._default_start_end = {}
# deprecated options prior to removal
self._xinfo = None
def get_argv(self):
return self._argv
def print_command_line(self):
logger.info("Command line: %s", self.get_command_line())
def get_command_line(self):
import libtbx.load_env
cl = libtbx.env.dispatcher_name
if cl:
if "xia2" not in cl or "python" in cl:
cl = "xia2"
else:
cl = "xia2"
for arg in sys.argv[1:]:
if " " in arg:
arg = '"%s"' % arg
cl += " %s" % arg
return cl
def setup(self):
"""Set everything up..."""
# check arguments are all ascii
logger.debug("Start parsing command line: " + str(sys.argv))
for token in sys.argv:
try:
token.encode("ascii")
except UnicodeDecodeError:
raise RuntimeError("non-ascii characters in input")
self._argv = copy.deepcopy(sys.argv)
# first of all try to interpret arguments as phil parameters/files
from xia2.Handlers.Phil import master_phil
from libtbx.phil import command_line
cmd_line = command_line.argument_interpreter(master_phil=master_phil)
working_phil, self._argv = cmd_line.process_and_fetch(
args=self._argv, custom_processor="collect_remaining"
)
PhilIndex.merge_phil(working_phil)
try:
params = PhilIndex.get_python_object()
except RuntimeError as e:
raise Sorry(e)
# sanity check / interpret Auto in input
from libtbx import Auto
if params.xia2.settings.input.atom is None:
if params.xia2.settings.input.anomalous is Auto:
PhilIndex.update("xia2.settings.input.anomalous=false")
else:
if params.xia2.settings.input.anomalous is False:
raise Sorry("Setting anomalous=false and atom type inconsistent")
params.xia2.settings.input.anomalous = True
PhilIndex.update("xia2.settings.input.anomalous=true")
if params.xia2.settings.resolution.keep_all_reflections is Auto:
if (
params.xia2.settings.small_molecule is True
and params.xia2.settings.resolution.d_min is None
and params.xia2.settings.resolution.d_max is None
):
PhilIndex.update("xia2.settings.resolution.keep_all_reflections=true")
else:
PhilIndex.update("xia2.settings.resolution.keep_all_reflections=false")
if params.xia2.settings.small_molecule is True:
logger.debug("Small molecule selected")
if params.xia2.settings.symmetry.chirality is None:
PhilIndex.update("xia2.settings.symmetry.chirality=nonchiral")
params = PhilIndex.get_python_object()
# pipeline options
self._read_pipeline()
for (parameter, value) in (
("project", params.xia2.settings.project),
("crystal", params.xia2.settings.crystal),
):
validate_project_crystal_name(parameter, value)
logger.debug("Project: %s" % params.xia2.settings.project)
logger.debug("Crystal: %s" % params.xia2.settings.crystal)
# FIXME add some consistency checks in here e.g. that there are
# images assigned, there is a lattice assigned if cell constants
# are given and so on
params = PhilIndex.get_python_object()
mp_params = params.xia2.settings.multiprocessing
from xia2.Handlers.Environment import get_number_cpus
if mp_params.mode == "parallel":
if mp_params.type == "qsub":
if which("qsub") is None:
raise Sorry("qsub not available")
if mp_params.njob is Auto:
mp_params.njob = get_number_cpus()
if mp_params.nproc is Auto:
mp_params.nproc = 1
elif mp_params.nproc is Auto:
mp_params.nproc = get_number_cpus()
elif mp_params.mode == "serial":
if mp_params.type == "qsub":
if which("qsub") is None:
raise Sorry("qsub not available")
if mp_params.njob is Auto:
mp_params.njob = 1
if mp_params.nproc is Auto:
mp_params.nproc = get_number_cpus()
PhilIndex.update("xia2.settings.multiprocessing.njob=%d" % mp_params.njob)
PhilIndex.update("xia2.settings.multiprocessing.nproc=%d" % mp_params.nproc)
params = PhilIndex.get_python_object()
mp_params = params.xia2.settings.multiprocessing
if mp_params.nproc > 1 and os.name == "nt":
raise Sorry("nproc > 1 is not supported on Windows.") # #191
if params.xia2.settings.indexer is not None:
add_preference("indexer", params.xia2.settings.indexer)
if params.xia2.settings.refiner is not None:
add_preference("refiner", params.xia2.settings.refiner)
if params.xia2.settings.integrater is not None:
add_preference("integrater", params.xia2.settings.integrater)
if params.xia2.settings.scaler is not None:
add_preference("scaler", params.xia2.settings.scaler)
# If no multi-sweep refinement options have been set, adopt the default:
# True for small-molecule mode, False otherwise.
if params.xia2.settings.multi_sweep_refinement is Auto:
if (
params.xia2.settings.small_molecule is True
and params.xia2.settings.indexer == "dials"
):
PhilIndex.update("xia2.settings.multi_sweep_refinement=True")
else:
PhilIndex.update("xia2.settings.multi_sweep_refinement=False")
params = PhilIndex.get_python_object()
# Multi-sweep refinement requires multi-sweep indexing.
if params.xia2.settings.multi_sweep_refinement:
# Check that the user hasn't specified multi_sweep_indexing False:
assert params.xia2.settings.multi_sweep_indexing, (
"It seems you have specified that xia2 should use multi-sweep "
"refinement without multi-sweep indexing.\n"
"This is not currently possible."
)
PhilIndex.update("xia2.settings.multi_sweep_indexing=True")
params = PhilIndex.get_python_object()
# If no multi-sweep indexing settings have yet been set (either because
# small_molecule is False or because it is True but the user has specified that
# multi_sweep_refinement is False), then adopt the default settings — True
# for small-molecule mode, False otherwise.
if params.xia2.settings.multi_sweep_indexing is Auto:
if (
params.xia2.settings.small_molecule is True
and params.xia2.settings.indexer == "dials"
):
PhilIndex.update("xia2.settings.multi_sweep_indexing=True")
else:
PhilIndex.update("xia2.settings.multi_sweep_indexing=False")
params = PhilIndex.get_python_object()
# Multi-sweep indexing is incompatible with parallel processing.
assert not (
params.xia2.settings.multi_sweep_indexing is True
and params.xia2.settings.multiprocessing.mode == "parallel"
), (
"Multi sweep indexing disabled:\n"
"MSI is not available for parallel processing."
)
input_json = params.xia2.settings.input.json
if input_json is not None and len(input_json):
for json_file in input_json:
assert os.path.isfile(json_file)
load_experiments(json_file)
reference_geometry = params.xia2.settings.input.reference_geometry
if reference_geometry is not None and len(reference_geometry) > 0:
reference_geometries = "\n".join(
[
"xia2.settings.input.reference_geometry=%s" % os.path.abspath(g)
for g in params.xia2.settings.input.reference_geometry
]
)
logger.debug(reference_geometries)
PhilIndex.update(reference_geometries)
logger.debug("xia2.settings.trust_beam_centre=true")
PhilIndex.update("xia2.settings.trust_beam_centre=true")
params = PhilIndex.get_python_object()
params = PhilIndex.get_python_object()
if params.xia2.settings.input.xinfo is not None:
xinfo_file = os.path.abspath(params.xia2.settings.input.xinfo)
PhilIndex.update("xia2.settings.input.xinfo=%s" % xinfo_file)
params = PhilIndex.get_python_object()
self.set_xinfo(xinfo_file)
# issue #55 if not set ATOM in xinfo but anomalous=true or atom= set
# on commandline, set here, should be idempotent
if params.xia2.settings.input.anomalous is True:
crystals = self._xinfo.get_crystals()
for xname in crystals:
xtal = crystals[xname]
logger.debug("Setting anomalous for crystal %s" % xname)
xtal.set_anomalous(True)
else:
xinfo_file = "%s/automatic.xinfo" % os.path.abspath(os.curdir)
PhilIndex.update("xia2.settings.input.xinfo=%s" % xinfo_file)
params = PhilIndex.get_python_object()
if params.dials.find_spots.phil_file is not None:
PhilIndex.update(
"dials.find_spots.phil_file=%s"
% os.path.abspath(params.dials.find_spots.phil_file)
)
if params.dials.index.phil_file is not None:
PhilIndex.update(
"dials.index.phil_file=%s"
% os.path.abspath(params.dials.index.phil_file)
)
if params.dials.refine.phil_file is not None:
PhilIndex.update(
"dials.refine.phil_file=%s"
% os.path.abspath(params.dials.refine.phil_file)
)
if params.dials.integrate.phil_file is not None:
PhilIndex.update(
"dials.integrate.phil_file=%s"
% os.path.abspath(params.dials.integrate.phil_file)
)
if params.xds.index.xparm is not None:
Flags.set_xparm(params.xds.index.xparm)
if params.xds.index.xparm_ub is not None:
Flags.set_xparm_ub(params.xds.index.xparm_ub)
if params.xia2.settings.scale.freer_file is not None:
freer_file = os.path.abspath(params.xia2.settings.scale.freer_file)
if not os.path.exists(freer_file):
raise RuntimeError("%s does not exist" % freer_file)
from xia2.Modules.FindFreeFlag import FindFreeFlag
column = FindFreeFlag(freer_file)
logger.debug(f"FreeR_flag column in {freer_file} found: {column}")
PhilIndex.update("xia2.settings.scale.freer_file=%s" % freer_file)
if params.xia2.settings.scale.reference_reflection_file is not None:
reference_reflection_file = os.path.abspath(
params.xia2.settings.scale.reference_reflection_file
)
if not os.path.exists(reference_reflection_file):
raise RuntimeError("%s does not exist" % reference_reflection_file)
PhilIndex.update(
"xia2.settings.scale.reference_reflection_file=%s"
% reference_reflection_file
)
params = PhilIndex.get_python_object()
datasets = unroll_datasets(PhilIndex.params.xia2.settings.input.image)
for dataset in datasets:
start_end = None
# here we only care about ':' which are later than C:\
if ":" in dataset[3:]:
tokens = dataset.split(":")
# cope with windows drives i.e. C:\data\blah\thing_0001.cbf:1:100
if len(tokens[0]) == 1:
tokens = ["%s:%s" % (tokens[0], tokens[1])] + tokens[2:]
if len(tokens) != 3:
raise RuntimeError("/path/to/image_0001.cbf:start:end")
dataset = tokens[0]
start_end = int(tokens[1]), int(tokens[2])
from xia2.Applications.xia2setup import is_hdf5_name
if os.path.exists(os.path.abspath(dataset)):
dataset = os.path.abspath(dataset)
else:
directories = [os.getcwd()] + self._argv[1:]
found = False
for d in directories:
if os.path.exists(os.path.join(d, dataset)):
dataset = os.path.join(d, dataset)
found = True
break
if not found:
raise Sorry(
"Could not find %s in %s" % (dataset, " ".join(directories))
)
if is_hdf5_name(dataset):
self._hdf5_master_files.append(dataset)
if start_end:
logger.debug("Image range: %d %d" % start_end)
if dataset not in self._default_start_end:
self._default_start_end[dataset] = []
self._default_start_end[dataset].append(start_end)
else:
logger.debug("No image range specified")
else:
template, directory = image2template_directory(os.path.abspath(dataset))
self._default_template.append(os.path.join(directory, template))
self._default_directory.append(directory)
logger.debug("Interpreted from image %s:" % dataset)
logger.debug("Template %s" % template)
logger.debug("Directory %s" % directory)
if start_end:
logger.debug("Image range: %d %d" % start_end)
key = os.path.join(directory, template)
if key not in self._default_start_end:
self._default_start_end[key] = []
self._default_start_end[key].append(start_end)
else:
logger.debug("No image range specified")
# finally, check that all arguments were read and raise an exception
# if any of them were nonsense.
with open("xia2-working.phil", "w") as f:
f.write(PhilIndex.working_phil.as_str())
f.write(
os.linesep
) # temporarily required for https://github.com/dials/dials/issues/522
with open("xia2-diff.phil", "w") as f:
f.write(PhilIndex.get_diff().as_str())
f.write(
os.linesep
) # temporarily required for https://github.com/dials/dials/issues/522
logger.debug("\nDifference PHIL:")
logger.debug(PhilIndex.get_diff().as_str())
logger.debug("Working PHIL:")
logger.debug(PhilIndex.working_phil.as_str())
nonsense = "Unknown command-line options:"
was_nonsense = False
for j, argv in enumerate(self._argv):
if j == 0:
continue
if argv[0] != "-" and "=" not in argv:
continue
if j not in self._understood:
nonsense += " %s" % argv
was_nonsense = True
if was_nonsense:
raise RuntimeError(nonsense)
# command line parsers, getters and help functions.
def set_xinfo(self, xinfo):
logger.debug(60 * "-")
logger.debug("XINFO file: %s" % xinfo)
with open(xinfo) as fh:
logger.debug(fh.read().strip())
logger.debug(60 * "-")
self._xinfo = XProject(xinfo)
def get_xinfo(self):
"""Return the XProject."""
return self._xinfo
def get_template(self):
return self._default_template
def get_start_ends(self, full_template):
return self._default_start_end.get(full_template, [])
def get_directory(self):
return self._default_directory
def get_hdf5_master_files(self):
return self._hdf5_master_files
@staticmethod
def _read_pipeline():
settings = PhilIndex.get_python_object().xia2.settings
indexer, refiner, integrater, scaler = None, None, None, None
if settings.pipeline == "3d":
logger.debug("3DR pipeline selected")
indexer, refiner, integrater, scaler = "xds", "xds", "xdsr", "xdsa"
elif settings.pipeline == "3di":
logger.debug("3DR pipeline; XDS indexing selected")
indexer, refiner, integrater, scaler = "xds", "xds", "xdsr", "xdsa"
elif settings.pipeline == "3dii":
logger.debug("3D II R pipeline (XDS IDXREF all images) selected")
indexer, refiner, integrater, scaler = "xdsii", "xds", "xdsr", "xdsa"
elif settings.pipeline == "3dd":
logger.debug("3DD pipeline (DIALS indexing) selected")
indexer, refiner, integrater, scaler = "dials", "xds", "xdsr", "xdsa"
elif settings.pipeline == "dials":
logger.debug("DIALS pipeline selected")
indexer, refiner, integrater, scaler = "dials", "dials", "dials", "dials"
elif settings.pipeline == "dials-aimless":
logger.debug("DIALS-LEGACY pipeline selected (DIALS, scaling with AIMLESS)")
indexer, refiner, integrater, scaler = "dials", "dials", "dials", "ccp4a"
if indexer is not None and settings.indexer is None:
PhilIndex.update("xia2.settings.indexer=%s" % indexer)
if refiner is not None and settings.refiner is None:
PhilIndex.update("xia2.settings.refiner=%s" % refiner)
if integrater is not None and settings.integrater is None:
PhilIndex.update("xia2.settings.integrater=%s" % integrater)
if scaler is not None and settings.scaler is None:
PhilIndex.update("xia2.settings.scaler=%s" % scaler)
if settings.scaler is not None:
if settings.pipeline.startswith("2d"):
allowed_scalers = ("ccp4a",)
elif settings.pipeline.startswith("3d"):
allowed_scalers = ("xdsa", "ccp4a")
elif settings.pipeline.startswith("dials"):
allowed_scalers = ("dials", "ccp4a")
if settings.scaler not in allowed_scalers:
raise ValueError(
"scaler=%s not compatible with pipeline=%s "
"(compatible scalers are %s)"
% (settings.scaler, settings.pipeline, " or ".join(allowed_scalers))
)
CommandLine = _CommandLine()
CommandLine.setup()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/CommandLine.py",
"copies": "1",
"size": "23317",
"license": "bsd-3-clause",
"hash": 3540070667827939300,
"line_mean": 39.3373702422,
"line_max": 88,
"alpha_frac": 0.5854171134,
"autogenerated": false,
"ratio": 3.88648108018003,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9969341964070277,
"avg_score": 0.0005112459019505001,
"num_lines": 578
} |
# A handler for management of program citations. This should initialise
# from a citations.xml file which can be found in a number of places...
# in particular $HOME or $USERDIR (I think, on Windows) .xia2,
# data etc...
#
# That would be %USERPROFILE%
import os
import xml.dom.minidom
class _Citations:
"""A class to track citations."""
def __init__(self):
self._citations = {}
self._cited = []
# set up the citations list...
dom = xml.dom.minidom.parse(
os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "Data", "citations.xml")
)
)
citations = dom.getElementsByTagName("citations")[0].getElementsByTagName(
"citation"
)
for citation in citations:
program = str(citation.attributes["program"].value)
citation_data = {}
for entry in citation.childNodes:
if entry.nodeType == entry.ELEMENT_NODE:
citation_data[entry.nodeName] = entry.childNodes[0].data
if "acta" not in citation_data:
# construct Acta style reference if necessary
citation_data["acta"] = self._bibtex_to_acta(citation_data["bibtex"])
if "url" not in citation_data:
# obtain URL from bibtex entry if possible
bibtex_data = self._parse_bibtex(citation_data["bibtex"])
if "url" in bibtex_data:
citation_data["url"] = bibtex_data["url"]
elif "doi" in bibtex_data:
citation_data["url"] = "https://doi.org/" + bibtex_data["doi"]
self._citations.setdefault(program, []).append(citation_data)
def cite(self, program):
"""Cite a given program."""
if program not in self._cited:
self._cited.append(program)
def get_programs(self):
"""Get a list of all of the programs which have been cited."""
return sorted(self._cited)
def get_citations(self):
"""Get a list of bibtex records of citations."""
return [cit["bibtex"] for cit in self.get_citations_dicts()]
def get_citations_dicts(self):
"""Get a list of citations dictionary objects."""
result = []
for c in self._cited:
for b in self._citations.get(c, []):
result.append(b)
return result
def get_citations_acta(self):
"""Return a list of strings of Acta style references."""
# want them in alphabetical order
return sorted(cit["acta"] for cit in self.get_citations_dicts())
def find_citations(self, program=None, acta=None):
"""Return a list of citations for a program name or an Acta style reference."""
results = []
if program:
results.extend(self._citations.get(program, []))
if acta:
results.extend(
citation
for citations in self._citations.values()
for citation in citations
if citation.get("acta") == acta
)
return results
def _parse_bibtex(self, bibtex):
"""A jiffy to parse a bibtex entry."""
contents = {"volume": ""}
for token in bibtex.split("\n"):
if "=" in token:
name, value = tuple(token.split("="))
# clean up the value...
value = value.replace("{", "").replace("}", "")
value = value.replace('"', "")
value = value.strip()
if value[-1] == ",":
value = value[:-1]
contents[name.strip()] = value
return contents
def _bibtex_to_acta(self, bibtex):
"""Construct an Acta-like formatted reference from a bibtex entry."""
data = self._parse_bibtex(bibtex)
actaformat = "%(author)s (%(year)s) %(journal)s %(volume)s."
# drop every 'and' but the last
data["author"] = data["author"].replace(
" and ", ", ", data["author"].count(" and ") - 1
)
if "pages" in data:
data["pages"] = data["pages"].replace("--", "-")
actaformat = "%(author)s (%(year)s) %(journal)s %(volume)s, %(pages)s."
return actaformat % data
Citations = _Citations()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/Citations.py",
"copies": "1",
"size": "4364",
"license": "bsd-3-clause",
"hash": -438990462421595800,
"line_mean": 30.1714285714,
"line_max": 87,
"alpha_frac": 0.5398716774,
"autogenerated": false,
"ratio": 4.029547553093259,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005034781690062453,
"num_lines": 140
} |
# A handler for matters of the operating environment, which will impact
# on data harvesting, working directories, a couple of other odds & sods.
import atexit
import ctypes
import logging
import os
import platform
import tempfile
from libtbx.introspection import number_of_processors
logger = logging.getLogger("xia2.Handlers.Environment")
def which(pgm):
path = os.getenv("PATH")
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
def memory_usage():
try:
import resource
return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
except Exception as e:
logger.debug("Error getting RAM usage: %s" % str(e))
return 0
def debug_memory_usage():
"""Print line, file, memory usage."""
try:
import inspect
frameinfo = inspect.getframeinfo(inspect.stack()[1][0])
logger.debug(
"RAM usage at %s %d: %d"
% (os.path.split(frameinfo.filename)[-1], frameinfo.lineno, memory_usage())
)
except Exception as e:
logger.debug("Error getting RAM usage: %s" % str(e))
def df(path=None):
"""Return disk space in bytes in path."""
path = path or os.getcwd()
if platform.system() == "Windows":
try:
bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(
ctypes.c_wchar_p(path), None, None, ctypes.pointer(bytes)
)
return bytes.value
except Exception as e:
logger.debug("Error getting disk space: %s" % str(e))
return 0
s = os.statvfs(path)
return s.f_frsize * s.f_bavail
def ulimit_n():
# see xia2#172 - change limit on number of file handles to smaller of
# hard limit, 4096
try:
import resource
except ImportError:
# not available on all operating systems. do nothing.
return
current, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
demand = min(4096, hard)
resource.setrlimit(resource.RLIMIT_NOFILE, (demand, demand))
current, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
logger.debug("File handle limits: %d/%d/%d" % (current, demand, hard))
def set_up_ccp4_tmpdir():
"""define a local CCP4_SCR"""
ccp4_scr = tempfile.mkdtemp()
os.environ["CCP4_SCR"] = ccp4_scr
logger.debug("Created CCP4_SCR: %s" % ccp4_scr)
def drop_ccp4_scr_tmpdir_if_possible():
try:
os.rmdir(ccp4_scr)
except Exception:
pass
atexit.register(drop_ccp4_scr_tmpdir_if_possible)
def get_number_cpus():
"""Portably get the number of processor cores available."""
if os.name == "nt":
# Windows only has once CPU because easy_mp does not support more. #191
return 1
# if environmental variable NSLOTS is set to a number then use that
try:
return int(os.environ.get("NSLOTS"))
except (ValueError, TypeError):
pass
return number_of_processors(return_value_if_unknown=-1)
set_up_ccp4_tmpdir()
ulimit_n()
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/Environment.py",
"copies": "1",
"size": "3131",
"license": "bsd-3-clause",
"hash": -7968598544354753000,
"line_mean": 25.7606837607,
"line_max": 87,
"alpha_frac": 0.6285531779,
"autogenerated": false,
"ratio": 3.537853107344633,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9665920659759007,
"avg_score": 0.00009712509712509713,
"num_lines": 117
} |
# A handler to manage the data ending up in CIF output file
import bz2
import datetime
import iotbx.cif.model
import xia2.Handlers.Citations
from xia2.Handlers.Versions import versions
mmcif_software_header = (
"_software.pdbx_ordinal",
"_software.citation_id",
"_software.name", # as defined at [1]
"_software.version",
"_software.type",
"_software.classification",
"_software.description",
)
mmcif_citations_header = (
"_citation.id",
"_citation.journal_abbrev",
"_citation.journal_volume",
"_citation.journal_issue",
"_citation.page_first",
"_citation.page_last",
"_citation.year",
"_citation.title",
)
class _CIFHandler:
def __init__(self, mmCIFsemantics=False):
self._cif = iotbx.cif.model.cif()
self._outfile = "xia2.cif" if not mmCIFsemantics else "xia2.mmcif.bz2"
self._mmCIFsemantics = mmCIFsemantics
# CIF/mmCIF key definitions
self._keyname = (
{
"audit.method": "_audit_creation_method",
"audit.date": "_audit_creation_date",
"sg.system": "_space_group_crystal_system",
"sg.number": "_space_group_IT_number",
"sw.reduction": "_computing_data_reduction",
"symm.ops": "_symmetry_equiv_pos_as_xyz",
"symm.sgsymbol": "_symmetry_space_group_name_H-M",
"cell.Z": "_cell_formula_units_Z",
"wavelength": "_diffrn_radiation_wavelength",
"wavelength.id": "_diffrn_radiation_wavelength_id",
"references": "_publ_section_references",
}
if not mmCIFsemantics
else {
"audit.method": "_audit.creation_method",
"audit.date": "_audit.creation_date",
"sg.system": "_space_group.crystal_system",
"sg.number": "_space_group.IT_number",
"sw.reduction": "_computing.data_reduction",
"symm.ops": "_symmetry_equiv.pos_as_xyz",
"symm.sgsymbol": "_symmetry.space_group_name_H-M",
"cell.Z": "_cell.formula_units_Z",
"wavelength": "_diffrn_radiation_wavelength.wavelength",
"wavelength.id": "_diffrn_radiation_wavelength.id",
"references": "_publ.section_references",
}
)
# prepopulate audit fields, so they end up at the top of the file
self.collate_audit_information()
def set_spacegroup(self, spacegroup, blockname=None):
sg = spacegroup.group()
loop = iotbx.cif.model.loop()
symm_ops = []
for i in range(sg.n_smx()):
rt_mx = sg(0, 0, i)
if rt_mx.is_unit_mx():
continue
symm_ops.append(str(rt_mx))
if self._mmCIFsemantics:
loop["_symmetry_equiv.id"] = list(range(1, len(symm_ops) + 1))
loop[self._keyname["symm.ops"]] = symm_ops
block = self.get_block(blockname)
if self._mmCIFsemantics:
block["_symmetry.entry_id"] = block["_entry.id"]
block[self._keyname["symm.sgsymbol"]] = spacegroup.lookup_symbol()
if self._mmCIFsemantics:
block["_space_group.id"] = 1 # category needs an 'id'
block[self._keyname["sg.system"]] = sg.crystal_system().lower()
block[self._keyname["sg.number"]] = spacegroup.number()
block.add_loop(loop)
def set_wavelengths(self, wavelength, blockname=None):
block = self.get_block(blockname)
if isinstance(wavelength, dict):
if self._keyname["wavelength"] in block:
del block[self._keyname["wavelength"]]
loop = iotbx.cif.model.loop(
header=[self._keyname["wavelength.id"], self._keyname["wavelength"]],
data=[s for item in wavelength.items() for s in item],
)
block.add_loop(loop)
else:
if len(wavelength) == 1:
block[self._keyname["wavelength"]] = wavelength[0]
else:
block[self._keyname["wavelength"]] = wavelength
def __str__(self):
"""Return CIF as string representation."""
# update audit information for citations
self.collate_audit_information()
return str(self._cif)
def write_cif(self, path):
"""Write CIF to file."""
# update audit information for citations
self.collate_audit_information()
path.mkdir(parents=True, exist_ok=True)
if self._outfile.endswith(".bz2"):
open_fn = bz2.open
else:
open_fn = open
with open_fn(str(path.joinpath(self._outfile)), "wt") as fh:
self._cif.show(out=fh)
def get_block(self, blockname=None):
"""Create (if necessary) and return named CIF block"""
if blockname is None:
blockname = "xia2"
assert blockname, "invalid block name"
if blockname not in self._cif:
self._cif[blockname] = iotbx.cif.model.block()
self._cif[blockname]["_entry.id"] = blockname
return self._cif[blockname]
def set_block(self, blockname, iotbxblock):
"""Store a block object, overwrite existing block if necessary"""
self._cif[blockname] = iotbxblock
def collate_audit_information(self, blockname=None):
block = self.get_block(blockname)
block["_audit.revision_id"] = 1
block[self._keyname["audit.method"]] = versions["xia2"]
block[self._keyname["audit.date"]] = datetime.date.today().isoformat()
xia2.Handlers.Citations.Citations.cite("xia2")
if self._mmCIFsemantics:
if "_software" not in block.loop_keys():
block.add_loop(iotbx.cif.model.loop(header=mmcif_software_header))
block.add_loop(iotbx.cif.model.loop(header=mmcif_citations_header))
software_loop = block.get_loop("_software")
citations_loop = block.get_loop("_citation")
# clear rows to avoid repeated row writing for multiple calls to
# collate_audit_information
for _ in range(0, software_loop.n_rows()):
software_loop.delete_row(0)
citations_loop.delete_row(0)
count = 1
for citation in xia2.Handlers.Citations.Citations.get_citations_dicts():
if "software_type" in citation:
software_loop.add_row(
(
count,
count,
citation["software_name"],
versions[citation["software_name"].lower()],
citation["software_type"],
citation["software_classification"],
citation["software_description"],
)
)
bibtex_data = xia2.Handlers.Citations.Citations._parse_bibtex(
citation["bibtex"]
)
citations_loop.add_row(
(
count,
bibtex_data["journal"],
bibtex_data["volume"],
bibtex_data["number"],
bibtex_data["pages"].split("--")[0],
bibtex_data["pages"].split("--")[1],
bibtex_data["year"],
bibtex_data["title"].replace("\\it ", ""),
)
)
count += 1
else:
programs = []
for program in xia2.Handlers.Citations.Citations.get_programs():
citations = []
for citation in xia2.Handlers.Citations.Citations.find_citations(
program
):
if "acta" in citation:
if ")" in citation["acta"]:
citations.append(
citation["acta"][
0 : citation["acta"].index(")")
].replace(" (", ", ")
)
else:
citations.append(citation["acta"])
if program == "xia2":
program = versions["xia2"]
elif program == "dials":
program = versions["dials"]
if citations:
program = program + " (%s)" % ("; ".join(citations))
programs.append(program)
block[self._keyname["sw.reduction"]] = "\n".join(programs)
block[self._keyname["references"]] = "\n".join(
xia2.Handlers.Citations.Citations.get_citations_acta()
)
CIF = _CIFHandler()
mmCIF = _CIFHandler(mmCIFsemantics=True)
# [1] http://mmcif.wwpdb.org/dictionaries/mmcif_pdbx_v50.dic/Items/_software.name.html
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Handlers/CIF.py",
"copies": "1",
"size": "9096",
"license": "bsd-3-clause",
"hash": -4668732332784693000,
"line_mean": 39.4266666667,
"line_max": 86,
"alpha_frac": 0.5163808267,
"autogenerated": false,
"ratio": 3.935958459541324,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4952339286241324,
"avg_score": null,
"num_lines": null
} |
# A handler to manage the data which needs to end up in the ISPyB database
# mapping xia2 verbose names to ISPyB API field names
# https://github.com/DiamondLightSource/ispyb-api/blob/
# 41bf10db91ab0f0bea91f77a5f37f087c28218ca/ispyb/sp/mxprocessing.py#L28-L32
_name_map = {
"High resolution limit": "res_lim_high",
"Low resolution limit": "res_lim_low",
"Completeness": "completeness",
"Multiplicity": "multiplicity",
"CC half": "cc_half",
"Anomalous completeness": "anom_completeness",
"Anomalous correlation": "cc_anom",
"Anomalous multiplicity": "anom_multiplicity",
"Total observations": "n_tot_obs",
"Total unique": "n_tot_unique_obs",
"Rmerge(I+/-)": "r_merge",
"Rmeas(I)": "r_meas_all_iplusi_minus",
"Rmeas(I+/-)": "r_meas_within_iplusi_minus",
"Rpim(I)": "r_pim_all_iplusi_minus",
"Rpim(I+/-)": "r_pim_within_iplusi_minus",
"Partial Bias": "fract_partial_bias",
"I/sigma": "mean_i_sig_i",
}
def xia2_to_json_object(xcrystals):
result = {}
for xcrystal in xcrystals:
cell = xcrystal.get_cell()
spacegroup = xcrystal.get_likely_spacegroups()[0]
# Stick closely to ISPyB field names
# https://github.com/DiamondLightSource/ispyb-api/blob/
# 41bf10db91ab0f0bea91f77a5f37f087c28218ca/ispyb/sp/mxprocessing.py#L24-L26
result["refined_results"] = {
"spacegroup": spacegroup,
"refinedcell_a": cell[0],
"refinedcell_b": cell[1],
"refinedcell_c": cell[2],
"refinedcell_alpha": cell[3],
"refinedcell_beta": cell[4],
"refinedcell_gamma": cell[5],
}
statistics_all = xcrystal.get_statistics()
# wavelength_names = xcrystal.get_wavelength_names()
for key, statistic in statistics_all.items():
pname, xname, dname = key
# FIXME should assert that the dname is a
# valid wavelength name
result["scaling_statistics"] = {}
for j, name in enumerate(("overall", "innerShell", "outerShell")):
result["scaling_statistics"][name] = {
_name_map[stat_name]: stat_value[j]
for stat_name, stat_value in statistic.items()
if stat_name in _name_map
}
result["integrations"] = []
xwavelength = xcrystal.get_xwavelength(dname)
sweeps = xwavelength.get_sweeps()
for sweep in sweeps:
# Stick closely to ISPyB field names
# https://github.com/DiamondLightSource/ispyb-api/blob/
# 41bf10db91ab0f0bea91f77a5f37f087c28218ca/ispyb/sp/
# mxprocessing.py#L34-L39
integration = {}
cell = sweep.get_integrater_cell()
for name, value in zip(["a", "b", "c", "alpha", "beta", "gamma"], cell):
integration["cell_%s" % name] = value
# FIXME this is naughty
indxr = sweep._get_indexer()
intgr = sweep._get_integrater()
start, end = intgr.get_integrater_wedge()
integration["start_image_no"] = start
integration["end_image_no"] = end
integration["refined_detector_dist"] = indxr.get_indexer_distance()
beam = indxr.get_indexer_beam_centre_raw_image()
integration["refined_xbeam"] = beam[0]
integration["refined_ybeam"] = beam[1]
result["integrations"].append(integration)
return result
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Interfaces/ISPyB/__init__.py",
"copies": "1",
"size": "3626",
"license": "bsd-3-clause",
"hash": -4944060234445878000,
"line_mean": 37.1684210526,
"line_max": 88,
"alpha_frac": 0.5728075014,
"autogenerated": false,
"ratio": 3.3419354838709676,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44147429852709674,
"avg_score": null,
"num_lines": null
} |
# A handler to manage the data which needs to end up in the ISPyB xml out
# file.
import os
import time
from xia2.Handlers.Files import FileHandler
from xia2.Handlers.Phil import PhilIndex
def sanitize(path):
"""Replace double path separators with single ones."""
double = os.sep * 2
return path.replace(double, os.sep)
class ISPyBXmlHandler:
def __init__(self, project):
self._crystals = {}
self._per_crystal_data = {}
self._project = project
self._name_map = {
"High resolution limit": "resolutionLimitHigh",
"Low resolution limit": "resolutionLimitLow",
"Completeness": "completeness",
"Multiplicity": "multiplicity",
"CC half": "ccHalf",
"Anomalous completeness": "anomalousCompleteness",
"Anomalous correlation": "ccAnomalous",
"Anomalous multiplicity": "anomalousMultiplicity",
"Total observations": "nTotalObservations",
"Total unique": "nTotalUniqueObservations",
"Rmerge(I+/-)": "rMerge",
"Rmeas(I)": "rMeasAllIPlusIMinus",
"Rmeas(I+/-)": "rMeasWithinIPlusIMinus",
"Rpim(I)": "rPimAllIPlusIMinus",
"Rpim(I+/-)": "rPimWithinIPlusIMinus",
"Partial Bias": "fractionalPartialBias",
"I/sigma": "meanIOverSigI",
}
def add_xcrystal(self, xcrystal):
if not xcrystal.get_name() in self._crystals:
self._crystals[xcrystal.get_name()] = xcrystal
# should ideally drill down and get the refined cell constants for
# each sweep and the scaling statistics for low resolution, high
# resolution and overall...
@staticmethod
def write_date(fout):
"""Write the current date and time out as XML."""
fout.write(
"<recordTimeStamp>%s</recordTimeStamp>\n"
% time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
)
@staticmethod
def write_cell(fout, cell):
"""Write out a UNIT CELL as XML..."""
fout.write("<cell_a>%f</cell_a>" % cell[0])
fout.write("<cell_b>%f</cell_b>" % cell[1])
fout.write("<cell_c>%f</cell_c>" % cell[2])
fout.write("<cell_alpha>%f</cell_alpha>" % cell[3])
fout.write("<cell_beta>%f</cell_beta>" % cell[4])
fout.write("<cell_gamma>%f</cell_gamma>" % cell[5])
@staticmethod
def write_refined_cell(fout, cell):
"""Write out a REFINED UNIT CELL as XML..."""
fout.write("<refinedCell_a>%f</refinedCell_a>" % cell[0])
fout.write("<refinedCell_b>%f</refinedCell_b>" % cell[1])
fout.write("<refinedCell_c>%f</refinedCell_c>" % cell[2])
fout.write("<refinedCell_alpha>%f</refinedCell_alpha>" % cell[3])
fout.write("<refinedCell_beta>%f</refinedCell_beta>" % cell[4])
fout.write("<refinedCell_gamma>%f</refinedCell_gamma>" % cell[5])
def write_scaling_statistics(self, fout, scaling_stats_type, stats_dict):
"""Write out the SCALING STATISTICS block..."""
fout.write("<AutoProcScalingStatistics>\n")
fout.write(
"<scalingStatisticsType>%s</scalingStatisticsType>\n" % scaling_stats_type
)
for name in stats_dict:
if name not in self._name_map:
continue
out_name = self._name_map[name]
if out_name in ["nTotalObservations", "nTotalUniqueObservations"]:
fout.write("<%s>%d</%s>" % (out_name, int(stats_dict[name]), out_name))
else:
fout.write("<%s>%s</%s>" % (out_name, stats_dict[name], out_name))
fout.write("</AutoProcScalingStatistics>\n")
def write_xml(self, file, command_line="", working_phil=None):
if working_phil is not None:
PhilIndex.merge_phil(working_phil)
params = PhilIndex.get_python_object()
fout = open(file, "w")
fout.write('<?xml version="1.0"?>')
fout.write("<AutoProcContainer>\n")
for crystal in sorted(self._crystals):
xcrystal = self._crystals[crystal]
cell = xcrystal.get_cell()
spacegroup = xcrystal.get_likely_spacegroups()[0]
fout.write("<AutoProc><spaceGroup>%s</spaceGroup>" % spacegroup)
self.write_refined_cell(fout, cell)
fout.write("</AutoProc>")
fout.write("<AutoProcScalingContainer>")
fout.write("<AutoProcScaling>")
self.write_date(fout)
fout.write("</AutoProcScaling>")
statistics_all = xcrystal.get_statistics()
reflection_files = xcrystal.get_scaled_merged_reflections()
for key in statistics_all:
pname, xname, dname = key
# FIXME should assert that the dname is a
# valid wavelength name
keys = [
"High resolution limit",
"Low resolution limit",
"Completeness",
"Multiplicity",
"I/sigma",
"Rmerge(I+/-)",
"CC half",
"Anomalous completeness",
"Anomalous correlation",
"Anomalous multiplicity",
"Total observations",
"Total unique",
"Rmeas(I)",
"Rmeas(I+/-)",
"Rpim(I)",
"Rpim(I+/-)",
"Partial Bias",
]
stats = [k for k in keys if k in statistics_all[key]]
xwavelength = xcrystal.get_xwavelength(dname)
sweeps = xwavelength.get_sweeps()
for j, name in enumerate(["overall", "innerShell", "outerShell"]):
statistics_cache = {}
for s in stats:
if isinstance(statistics_all[key][s], type([])):
statistics_cache[s] = statistics_all[key][s][j]
elif isinstance(statistics_all[key][s], type(())):
statistics_cache[s] = statistics_all[key][s][j]
# send these to be written out
self.write_scaling_statistics(fout, name, statistics_cache)
for sweep in sweeps:
fout.write("<AutoProcIntegrationContainer>\n")
if "#" in sweep.get_template():
image_name = sweep.get_image_name(0)
else:
image_name = os.path.join(
sweep.get_directory(), sweep.get_template()
)
fout.write(
"<Image><fileName>%s</fileName>" % os.path.split(image_name)[-1]
)
fout.write(
"<fileLocation>%s</fileLocation></Image>"
% sanitize(os.path.split(image_name)[0])
)
fout.write("<AutoProcIntegration>\n")
cell = sweep.get_integrater_cell()
self.write_cell(fout, cell)
# FIXME this is naughty
intgr = sweep._get_integrater()
start, end = intgr.get_integrater_wedge()
fout.write("<startImageNumber>%d</startImageNumber>" % start)
fout.write("<endImageNumber>%d</endImageNumber>" % end)
# FIXME this is naughty
indxr = sweep._get_indexer()
fout.write(
"<refinedDetectorDistance>%f</refinedDetectorDistance>"
% indxr.get_indexer_distance()
)
beam = indxr.get_indexer_beam_centre_raw_image()
fout.write("<refinedXBeam>%f</refinedXBeam>" % beam[0])
fout.write("<refinedYBeam>%f</refinedYBeam>" % beam[1])
fout.write("</AutoProcIntegration>\n")
fout.write("</AutoProcIntegrationContainer>\n")
fout.write("</AutoProcScalingContainer>")
# file unpacking nonsense
if not command_line:
from xia2.Handlers.CommandLine import CommandLine
command_line = CommandLine.get_command_line()
pipeline = params.xia2.settings.pipeline
fout.write("<AutoProcProgramContainer><AutoProcProgram>")
fout.write(
"<processingCommandLine>%s</processingCommandLine>"
% sanitize(command_line)
)
fout.write("<processingPrograms>xia2 %s</processingPrograms>" % pipeline)
fout.write("</AutoProcProgram>")
data_directory = self._project.path / "DataFiles"
log_directory = self._project.path / "LogFiles"
for k in reflection_files:
reflection_file = reflection_files[k]
if not isinstance(reflection_file, type("")):
continue
reflection_file = FileHandler.get_data_file(
self._project.path, reflection_file
)
basename = os.path.basename(reflection_file)
if data_directory.joinpath(basename).exists():
# Use file in DataFiles directory in preference (if it exists)
reflection_file = str(data_directory.joinpath(basename))
fout.write("<AutoProcProgramAttachment><fileType>Result")
fout.write(
"</fileType><fileName>%s</fileName>"
% os.path.split(reflection_file)[-1]
)
fout.write(
"<filePath>%s</filePath>"
% sanitize(os.path.split(reflection_file)[0])
)
fout.write("</AutoProcProgramAttachment>\n")
g = log_directory.glob("*merging-statistics.json")
for merging_stats_json in g:
fout.write("<AutoProcProgramAttachment><fileType>Graph")
fout.write(
"</fileType><fileName>%s</fileName>"
% os.path.split(str(merging_stats_json))[-1]
)
fout.write("<filePath>%s</filePath>" % sanitize(str(log_directory)))
fout.write("</AutoProcProgramAttachment>\n")
# add the xia2.txt file...
fout.write("<AutoProcProgramAttachment><fileType>Log")
fout.write("</fileType><fileName>xia2.txt</fileName>")
fout.write("<filePath>%s</filePath>" % sanitize(os.getcwd()))
fout.write("</AutoProcProgramAttachment>\n")
fout.write("</AutoProcProgramContainer>")
fout.write("</AutoProcContainer>\n")
fout.close()
def json_object(self, command_line=""):
result = {}
for crystal in sorted(self._crystals):
xcrystal = self._crystals[crystal]
cell = xcrystal.get_cell()
spacegroup = xcrystal.get_likely_spacegroups()[0]
result["AutoProc"] = {}
tmp = result["AutoProc"]
tmp["spaceGroup"] = spacegroup
for name, value in zip(["a", "b", "c", "alpha", "beta", "gamma"], cell):
tmp["refinedCell_%s" % name] = value
result["AutoProcScalingContainer"] = {}
tmp = result["AutoProcScalingContainer"]
tmp["AutoProcScaling"] = {
"recordTimeStamp": time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
}
statistics_all = xcrystal.get_statistics()
reflection_files = xcrystal.get_scaled_merged_reflections()
for key in list(statistics_all.keys()):
pname, xname, dname = key
# FIXME should assert that the dname is a
# valid wavelength name
keys = [
"High resolution limit",
"Low resolution limit",
"Completeness",
"Multiplicity",
"I/sigma",
"Rmerge(I+/-)",
"CC half",
"Anomalous completeness",
"Anomalous correlation",
"Anomalous multiplicity",
"Total observations",
"Total unique",
"Rmeas(I)",
"Rmeas(I+/-)",
"Rpim(I)",
"Rpim(I+/-)",
"Partial Bias",
]
stats = [k for k in keys if k in statistics_all[key]]
xwavelength = xcrystal.get_xwavelength(dname)
sweeps = xwavelength.get_sweeps()
tmp["AutoProcScalingStatistics"] = []
tmp2 = tmp["AutoProcScalingStatistics"]
for j, name in enumerate(["overall", "innerShell", "outerShell"]):
statistics_cache = {"scalingStatisticsType": name}
for s in stats:
if s in self._name_map:
n = self._name_map[s]
else:
continue
if isinstance(statistics_all[key][s], type([])):
statistics_cache[n] = statistics_all[key][s][j]
elif isinstance(statistics_all[key][s], type(())):
statistics_cache[n] = statistics_all[key][s][j]
tmp2.append(statistics_cache)
tmp["AutoProcIntegrationContainer"] = []
tmp2 = tmp["AutoProcIntegrationContainer"]
for sweep in sweeps:
if "#" in sweep.get_template():
image_name = sweep.get_image_name(0)
else:
image_name = os.path.join(
sweep.get_directory(), sweep.get_template()
)
cell = sweep.get_integrater_cell()
intgr_tmp = {}
for name, value in zip(
["a", "b", "c", "alpha", "beta", "gamma"], cell
):
intgr_tmp["cell_%s" % name] = value
# FIXME this is naughty
indxr = sweep._get_indexer()
intgr = sweep._get_integrater()
start, end = intgr.get_integrater_wedge()
intgr_tmp["startImageNumber"] = start
intgr_tmp["endImageNumber"] = end
intgr_tmp["refinedDetectorDistance"] = indxr.get_indexer_distance()
beam = indxr.get_indexer_beam_centre_raw_image()
intgr_tmp["refinedXBeam"] = beam[0]
intgr_tmp["refinedYBeam"] = beam[1]
tmp2.append(
{
"Image": {
"fileName": os.path.split(image_name)[-1],
"fileLocation": sanitize(os.path.split(image_name)[0]),
},
"AutoProcIntegration": intgr_tmp,
}
)
# file unpacking nonsense
result["AutoProcProgramContainer"] = {}
tmp = result["AutoProcProgramContainer"]
tmp2 = {}
if not command_line:
from xia2.Handlers.CommandLine import CommandLine
command_line = CommandLine.get_command_line()
tmp2["processingCommandLine"] = sanitize(command_line)
tmp2["processingProgram"] = "xia2"
tmp["AutoProcProgram"] = tmp2
tmp["AutoProcProgramAttachment"] = []
tmp2 = tmp["AutoProcProgramAttachment"]
data_directory = self._project.path / "DataFiles"
for k in reflection_files:
reflection_file = reflection_files[k]
if not isinstance(reflection_file, type("")):
continue
reflection_file = FileHandler.get_data_file(
self._project.path, reflection_file
)
basename = os.path.basename(reflection_file)
if data_directory.joinpath(basename).exists():
# Use file in DataFiles directory in preference (if it exists)
reflection_file = str(data_directory.joinpath(basename))
tmp2.append(
{
"fileType": "Result",
"fileName": os.path.split(reflection_file)[-1],
"filePath": sanitize(os.path.split(reflection_file)[0]),
}
)
tmp2.append(
{
"fileType": "Log",
"fileName": "xia2.txt",
"filePath": sanitize(os.getcwd()),
}
)
return result
| {
"repo_name": "xia2/xia2",
"path": "src/xia2/Interfaces/ISPyB/ISPyBXmlHandler.py",
"copies": "1",
"size": "17242",
"license": "bsd-3-clause",
"hash": 7327731286512828000,
"line_mean": 36.6462882096,
"line_max": 88,
"alpha_frac": 0.4937942234,
"autogenerated": false,
"ratio": 4.373921867072552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0004107507356871734,
"num_lines": 458
} |
# A handler to render the index.html template for the MOL AngularJS SPA
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import logging
import os
import random
import ee_assets
import webapp2
import json
import cache
if 'SERVER_SOFTWARE' in os.environ:
PROD = not os.environ['SERVER_SOFTWARE'].startswith('Development')
else:
PROD = True
class BaseHandler(webapp2.RequestHandler):
def render_template(self, f, template_args):
path = os.path.join(os.path.dirname(__file__), "server_templates", f)
self.response.out.write(template.render(path, template_args))
def push_html(self, f):
path = os.path.join(os.path.dirname(__file__), "server_templates", f)
self.response.out.write(open(path, 'r').read())
class Earthenv(BaseHandler):
def get(self):
datasets = cache.get('earthenv-maps')
self.render_template(
'index.html',
{
"rand": "klsjdflkjs",
"collections" : datasets
})
def post(self):
self.push_html('index.html')
application = webapp2.WSGIApplication(
[
('/.*', Earthenv),
('/', Earthenv)
],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| {
"repo_name": "earthenv/mapotron",
"path": "earthenv.py",
"copies": "1",
"size": "1308",
"license": "bsd-3-clause",
"hash": -1846455668739548200,
"line_mean": 24.6470588235,
"line_max": 77,
"alpha_frac": 0.6391437309,
"autogenerated": false,
"ratio": 3.4603174603174605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9477558461637394,
"avg_score": 0.024380545916013283,
"num_lines": 51
} |
# A handy translator that converts control flow into the appropriate
# llvm_cbuilder constructs
from numba.functions import _get_ast, fix_ast_lineno
import inspect, functools, ast
import logging
logger = logging.getLogger(__name__)
def translate(func):
# TODO use meta package
wrapper = functools.wraps(func)
caller_frame = inspect.currentframe().f_back
tree = _get_ast(func)
tree = ast.Module(body=tree.body)
tree = ExpandControlFlow().visit(tree)
fix_ast_lineno(tree)
# prepare locals for execution
local_dict = locals()
local_dict.update(caller_frame.f_locals)
local_dict.update(caller_frame.f_globals)
try:
compiled = compile(tree, '<string>', 'exec')
return eval(compiled)
except Exception as e:
logger.debug(ast.dump(tree))
from ArminRonacher import codegen # uses Armin Ronacher's codegen to debug
# http://dev.pocoo.org/hg/sandbox/file/852a1248c8eb/ast/codegen.py
logger.debug(codegen.to_source(tree))
raise
_if_else_template = '''
with self.ifelse(__CONDITION__) as _ifelse_:
with _ifelse_.then():
__THEN__
with _ifelse_.otherwise():
__OTHERWISE__
'''
_while_template = '''
with self.loop() as _loop_:
with _loop_.condition() as _setcond_:
_setcond_(__CONDITION__)
with _loop_.body():
__BODY__
'''
_for_range_template = '''
with self.for_range(*__ARGS__) as (_loop_, __ITER__):
__BODY__
'''
_return_template = 'self.ret(__RETURN__)'
_const_int_template = 'self.constant(C.int, __VALUE__)'
_const_long_template = 'self.constant(C.long, __VALUE__)'
_const_float_template = 'self.constant(C.double, __VALUE__)'
def load_template(string):
'''
Since ast.parse() returns a ast.Module node,
it is more useful to trim the Module and get to the first item of body
'''
tree = ast.parse(string) # return a Module
assert isinstance(tree, ast.Module)
return tree.body[0] # get the first item of body
class ExpandControlFlow(ast.NodeTransformer):
'''
Expand control flow contructs.
These are the most tedious thing to do in llvm_cbuilder.
'''
## Use breadcumb to track parent nodes
# def __init__(self):
# self.breadcumb = []
#
# def visit(self, node):
# self.breadcumb.append(node)
# try:
# return super(ExpandControlFlow, self).visit(node)
# finally:
# self.breadcumb.pop()
#
# @property
# def parent(self):
# return self.breadcumb[-2]
def visit_If(self, node):
mapping = {
'__CONDITION__' : node.test,
'__THEN__' : node.body,
'__OTHERWISE__' : node.orelse,
}
ifelse = load_template(_if_else_template)
ifelse = MacroExpander(mapping).visit(ifelse)
newnode = self.generic_visit(ifelse)
return newnode
def visit_While(self, node):
mapping = {
'__CONDITION__' : node.test,
'__BODY__' : node.body,
}
whileloop = load_template(_while_template)
whileloop = MacroExpander(mapping).visit(whileloop)
newnode = self.generic_visit(whileloop)
return newnode
def visit_For(self, node):
try:
if node.iter.func.id not in ['range', 'xrange']:
return node
except AttributeError:
return node
mapping = {
'__ITER__' : node.target,
'__BODY__' : node.body,
'__ARGS__' : ast.Tuple(elts=node.iter.args, ctx=ast.Load()),
}
forloop = load_template(_for_range_template)
forloop = MacroExpander(mapping).visit(forloop)
newnode = self.generic_visit(forloop)
return newnode
def visit_Return(self, node):
mapping = {'__RETURN__' : node.value}
ret = load_template(_return_template)
repl = MacroExpander(mapping).visit(ret)
return repl
def visit_Num(self, node):
'''convert immediate values
'''
typemap = {
int : _const_int_template,
long : _const_long_template, # TODO: disable long for py3
float : _const_float_template,
}
template = load_template(typemap[type(node.n)])
mapping = {
'__VALUE__' : node,
}
constant = MacroExpander(mapping).visit(template).value
newnode = constant
return newnode
class MacroExpander(ast.NodeTransformer):
def __init__(self, mapping):
self.mapping = mapping
def visit_With(self, node):
'''
Expand X in the following:
with blah:
X
Nothing should go before or after X.
X must be a list of nodes.
'''
if (len(node.body)==1 # the body of
and isinstance(node.body[0], ast.Expr)
and isinstance(node.body[0].value, ast.Name)):
try:
repl = self.mapping.pop(node.body[0].value.id)
except KeyError:
pass
else:
old = node.body[0]
node.body = repl
return self.generic_visit(node) # recursively apply expand all macros
def visit_Name(self, node):
'''
Expand all Name node to simple value
'''
try:
repl = self.mapping.pop(node.id)
except KeyError:
pass
else:
if repl is not None and not isinstance(repl, list):
return repl
return node
| {
"repo_name": "llvmpy/llvmpy",
"path": "llvm_cbuilder/translator.py",
"copies": "1",
"size": "5607",
"license": "bsd-3-clause",
"hash": 474415111170880400,
"line_mean": 27.9020618557,
"line_max": 82,
"alpha_frac": 0.5662564651,
"autogenerated": false,
"ratio": 3.8116927260367097,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48779491911367096,
"avg_score": null,
"num_lines": null
} |
"""A hardware card."""
from csrv.model import actions
from csrv.model import events
from csrv.model import modifiers
from csrv.model.cards import installable_card
from csrv.model import game_object
from csrv.model import timing_phases
from csrv.model.cards import card_info
class Hardware(installable_card.InstallableCard):
TYPE = card_info.HARDWARE
WHEN_IN_HAND_PROVIDES_CHOICES_FOR = {
timing_phases.RunnerTurnActions: 'install_actions',
}
@property
def cost(self):
cost = self.COST
for mod in self.game.modifiers[
modifiers.HardwareCostModifier].card_scope[self]:
cost += mod.value
for mod in self.game.modifiers[
modifiers.HardwareCostModifier].global_scope:
cost += mod.value
return cost
def build_actions(self):
installable_card.InstallableCard.build_actions(self)
self.install_action = actions.InstallHardware(self.game, self.player, self)
def on_install(self):
installable_card.InstallableCard.on_install(self)
self.trigger_event(events.InstallHardware(self.game, self.player))
def install_actions(self):
if self.player.clicks.value:
return [self.install_action]
return []
def install_host_targets(self):
targets = []
for card in self.game.runner.rig.cards:
if card.can_host(self):
targets.append(card)
return targets
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/cards/hardware.py",
"copies": "1",
"size": "1364",
"license": "apache-2.0",
"hash": 3393654110893479400,
"line_mean": 26.8367346939,
"line_max": 79,
"alpha_frac": 0.7162756598,
"autogenerated": false,
"ratio": 3.6373333333333333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48536089931333337,
"avg_score": null,
"num_lines": null
} |
"""A health check for OpenShift clusters."""
from openshift_checks import OpenShiftCheck
class EtcdVolume(OpenShiftCheck):
"""Ensures etcd storage usage does not exceed a given threshold."""
name = "etcd_volume"
tags = ["etcd", "health"]
# Default device usage threshold. Value should be in the range [0, 100].
default_threshold_percent = 90
# Where to find etcd data
etcd_mount_path = "/var/lib/etcd"
def is_active(self):
etcd_hosts = (
self.get_var("groups", "oo_etcd_to_config", default=[]) or
self.get_var("groups", "oo_masters_to_config", default=[]) or
[]
)
is_etcd_host = self.get_var("ansible_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
def run(self):
mount_info = self.find_ansible_mount(self.etcd_mount_path)
available = mount_info["size_available"]
total = mount_info["size_total"]
used = total - available
threshold = self.get_var(
"etcd_device_usage_threshold_percent",
default=self.default_threshold_percent
)
used_percent = 100.0 * used / total
if used_percent > threshold:
device = mount_info.get("device", "unknown")
mount = mount_info.get("mount", "unknown")
msg = "etcd storage usage ({:.1f}%) is above threshold ({:.1f}%). Device: {}, mount: {}.".format(
used_percent, threshold, device, mount
)
return {"failed": True, "msg": msg}
return {}
| {
"repo_name": "ivanhorvath/openshift-tools",
"path": "openshift/installer/vendored/openshift-ansible-3.7.52-1/roles/openshift_health_checker/openshift_checks/etcd_volume.py",
"copies": "48",
"size": "1586",
"license": "apache-2.0",
"hash": -6944093126916974000,
"line_mean": 32.7446808511,
"line_max": 109,
"alpha_frac": 0.5838587642,
"autogenerated": false,
"ratio": 3.8309178743961354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A health check for OpenShift clusters."""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
class EtcdVolume(OpenShiftCheck):
"""Ensures etcd storage usage does not exceed a given threshold."""
name = "etcd_volume"
tags = ["etcd", "health"]
# Default device usage threshold. Value should be in the range [0, 100].
default_threshold_percent = 90
# Where to find ectd data, higher priority first.
supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
@classmethod
def is_active(cls, task_vars):
etcd_hosts = get_var(task_vars, "groups", "etcd", default=[]) or get_var(task_vars, "groups", "masters",
default=[]) or []
is_etcd_host = get_var(task_vars, "ansible_ssh_host") in etcd_hosts
return super(EtcdVolume, cls).is_active(task_vars) and is_etcd_host
def run(self, tmp, task_vars):
mount_info = self._etcd_mount_info(task_vars)
available = mount_info["size_available"]
total = mount_info["size_total"]
used = total - available
threshold = get_var(
task_vars,
"etcd_device_usage_threshold_percent",
default=self.default_threshold_percent
)
used_percent = 100.0 * used / total
if used_percent > threshold:
device = mount_info.get("device", "unknown")
mount = mount_info.get("mount", "unknown")
msg = "etcd storage usage ({:.1f}%) is above threshold ({:.1f}%). Device: {}, mount: {}.".format(
used_percent, threshold, device, mount
)
return {"failed": True, "msg": msg}
return {"changed": False}
def _etcd_mount_info(self, task_vars):
ansible_mounts = get_var(task_vars, "ansible_mounts")
mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
for path in self.supported_mount_paths:
if path in mounts:
return mounts[path]
paths = ', '.join(sorted(mounts)) or 'none'
msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths)
raise OpenShiftCheckException(msg)
| {
"repo_name": "git001/openshift-ansible",
"path": "roles/openshift_health_checker/openshift_checks/etcd_volume.py",
"copies": "3",
"size": "2253",
"license": "apache-2.0",
"hash": 8968467564501589000,
"line_mean": 37.8448275862,
"line_max": 112,
"alpha_frac": 0.5823346649,
"autogenerated": false,
"ratio": 3.8844827586206896,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.596681742352069,
"avg_score": null,
"num_lines": null
} |
"""A health check for OpenShift clusters."""
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class EtcdVolume(OpenShiftCheck):
"""Ensures etcd storage usage does not exceed a given threshold."""
name = "etcd_volume"
tags = ["etcd", "health"]
# Default device usage threshold. Value should be in the range [0, 100].
default_threshold_percent = 90
# Where to find ectd data, higher priority first.
supported_mount_paths = ["/var/lib/etcd", "/var/lib", "/var", "/"]
def is_active(self):
etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
is_etcd_host = self.get_var("ansible_ssh_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
def run(self):
mount_info = self._etcd_mount_info()
available = mount_info["size_available"]
total = mount_info["size_total"]
used = total - available
threshold = self.get_var(
"etcd_device_usage_threshold_percent",
default=self.default_threshold_percent
)
used_percent = 100.0 * used / total
if used_percent > threshold:
device = mount_info.get("device", "unknown")
mount = mount_info.get("mount", "unknown")
msg = "etcd storage usage ({:.1f}%) is above threshold ({:.1f}%). Device: {}, mount: {}.".format(
used_percent, threshold, device, mount
)
return {"failed": True, "msg": msg}
return {"changed": False}
def _etcd_mount_info(self):
ansible_mounts = self.get_var("ansible_mounts")
mounts = {mnt.get("mount"): mnt for mnt in ansible_mounts}
for path in self.supported_mount_paths:
if path in mounts:
return mounts[path]
paths = ', '.join(sorted(mounts)) or 'none'
msg = "Unable to find etcd storage mount point. Paths mounted: {}.".format(paths)
raise OpenShiftCheckException(msg)
| {
"repo_name": "rhdedgar/openshift-tools",
"path": "openshift/installer/vendored/openshift-ansible-3.6.173.0.27/roles/openshift_health_checker/openshift_checks/etcd_volume.py",
"copies": "1",
"size": "2050",
"license": "apache-2.0",
"hash": -6929326387651361000,
"line_mean": 36.2727272727,
"line_max": 118,
"alpha_frac": 0.6019512195,
"autogenerated": false,
"ratio": 3.8317757009345796,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9931126431203074,
"avg_score": 0.0005200978463010549,
"num_lines": 55
} |
"""A heap that is used in the persistentdb implementation
"""
# from .persistentdb import TYPES, TYPE_DEFAULT
import os
import struct
import timeseries
import json
import pickle
from tsdb.tsdb_constants import *
class HeapFile:
def __init__(self, heap_file_name):
self.filename = heap_file_name
if not os.path.exists(self.filename):
self.fd = open(self.filename, "xb+", buffering=0)
else:
self.fd = open(self.filename, "r+b", buffering=0)
self.readptr = self.fd.tell()
self.fd.seek(0,2)
self.writeptr = self.fd.tell()
def close(self):
self.fd.close()
class MetaHeapFile(HeapFile):
def __init__(self, heap_file_name, schema):
super().__init__(heap_file_name)
# if metaheap is new, write it to disk
if not os.path.exists(heap_file_name+'_metadata.met'):
self._create_compression_string(schema)
with open(heap_file_name+'_metadata.met','xb',buffering=0) as fd:
pickle.dump((self.compression_string,
self.fields,
self.fieldsDefaultValues,
self.byteArrayLength), fd)
# print("new metaheap meta values written to disk")
# otherwise load it
else:
with open(heap_file_name+'_metadata.met','rb',buffering=0) as fd:
self.compression_string, self.fields, self.fieldsDefaultValues, self.byteArrayLength = pickle.load(fd)
def _create_compression_string(self, schema):
fieldList = sorted(list(schema.keys()))
# pk and ts will be stored in the index file and tsheap file respectively
fieldList.remove('ts')
fieldList.remove('pk')
self.compression_string = ''
self.fields = []
self.fieldsDefaultValues = []
for field in fieldList:
self.compression_string += TYPES[schema[field]['type']]
self.fields.append(field)
self.fieldsDefaultValues.append(TYPE_DEFAULT[schema[field]['type']])
if schema[field]['type'] != "bool":
self.compression_string += TYPES['bool']
self.fields.append(field+"_set")
self.fieldsDefaultValues.append(False)
self.byteArrayLength = len(struct.pack(self.compression_string,
*self.fieldsDefaultValues))
def check_byteArray(self,byteArray):
"method to double check compression succeeded"
assert(len(byteArray) == self.byteArrayLength)
def encode_and_write_meta(self, meta, pk_offset=None):
"takes metadata and writes to file, return the offset of the write"
byteArray = struct.pack(self.compression_string,*meta)
self.check_byteArray(byteArray)
if pk_offset is None:
pk_offset = self.writeptr
self.fd.seek(pk_offset)
self.fd.write(byteArray)
self.fd.seek(0,2)
self.writeptr = self.fd.tell()
return pk_offset
def read_and_return_meta(self,pk_offset):
self.fd.seek(pk_offset)
buff = self.fd.read(self.byteArrayLength)
return list(struct.unpack(self.compression_string,buff))
class TSHeapFile(HeapFile):
def __init__(self, heap_file_name, ts_length):
super().__init__(heap_file_name)
if not os.path.exists(heap_file_name+'_metadata.met'):
self.ts_length = ts_length
self.byteArrayLength = self.ts_length * 2 * BYTES_PER_NUM
with open(heap_file_name+'_metadata.met','xb',buffering=0) as fd:
pickle.dump((self.ts_length,self.byteArrayLength), fd)
# print("new tsheap meta values written to disk")
# otherwise load it
else:
with open(heap_file_name+'_metadata.met','rb',buffering=0) as fd:
self.ts_length, self.byteArrayLength = pickle.load(fd)
def encode_and_write_ts(self, ts):
ts_items = ts._TimeSeries
times = ts_items[0]
values = ts_items[1]
byteArray = struct.pack('%sd' % (2*self.ts_length), *times, *values)
assert(len(byteArray) == self.byteArrayLength)
self.fd.seek(self.writeptr)
ts_offset = self.fd.tell()
self.fd.write(byteArray)
self.fd.seek(0,2)
self.writeptr = self.fd.tell()
return ts_offset
def read_and_decode_ts(self, offset):
self.fd.seek(offset)
buff = self.fd.read(self.byteArrayLength)
items = struct.unpack('%sd' % (2*self.ts_length),buff)
return timeseries.TimeSeries(items[:self.ts_length], items[self.ts_length:]) | {
"repo_name": "cs207-project/TimeSeries",
"path": "tsdb/utils_heapfile.py",
"copies": "1",
"size": "4660",
"license": "mit",
"hash": 4774316766354948000,
"line_mean": 39.5304347826,
"line_max": 118,
"alpha_frac": 0.6027896996,
"autogenerated": false,
"ratio": 3.767178658043654,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9846523332545171,
"avg_score": 0.004689005019696624,
"num_lines": 115
} |
"""A hello world.
Uses the main libraries to verify the environment installation.
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
# import pydensecrf.densecrf as crf
import pymia.evaluation.evaluator as pymia_eval
import SimpleITK as sitk
import sklearn.ensemble as sk_ensemble
def main():
print('Welcome to MIALab 2020!')
# --- scikit-learn
clf = sk_ensemble.RandomForestClassifier(max_depth=2, random_state=0)
# --- SimpleITK
image = sitk.Image(256, 128, 64, sitk.sitkInt16)
print('Image dimension:', image.GetDimension())
print('Voxel intensity before setting:', image.GetPixel(0, 0, 0))
image.SetPixel(0, 0, 0, 1)
print('Voxel intensity after setting:', image.GetPixel(0, 0, 0))
# --- numpy and matplotlib
array = np.array([1, 23, 2, 4])
plt.plot(array)
plt.ylabel('Some meaningful numbers')
plt.xlabel('The x-axis')
plt.title('Wohoo')
plt.show()
# --- pydensecrf
# d = crf.DenseCRF(1000, 2)
# --- pymia
eval = pymia_eval.SegmentationEvaluator([], {})
print('Everything seems to work fine!')
if __name__ == "__main__":
"""The program's entry point."""
main()
| {
"repo_name": "istb-mia/MIALab",
"path": "bin/hello_world.py",
"copies": "1",
"size": "1189",
"license": "apache-2.0",
"hash": 884831718512091100,
"line_mean": 22.3137254902,
"line_max": 73,
"alpha_frac": 0.6518082422,
"autogenerated": false,
"ratio": 3.330532212885154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9482340455085154,
"avg_score": 0,
"num_lines": 51
} |
"""A hello world.
Uses the main libraries to verify the environment installation.
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import pydensecrf.densecrf as crf
import SimpleITK as sitk
import tensorflow as tf
from tensorflow.python.platform import app
def main(_):
print('Welcome to MIALab 2017!')
# --- TensorFlow
# Create a Constant op
# The op is added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
hello = tf.constant('Hello, TensorFlow!')
# Start tf session
sess = tf.Session()
# Run the op
print(sess.run(hello).decode(sys.getdefaultencoding()))
# --- SimpleITK
image = sitk.Image(256, 128, 64, sitk.sitkInt16)
print('Image dimension:', image.GetDimension())
print('Voxel value before setting:', image.GetPixel(0, 0, 0))
image.SetPixel(0, 0, 0, 1)
print('Voxel value after setting:', image.GetPixel(0, 0, 0))
# --- numpy and matplotlib
array = np.array([1, 23, 2, 4])
plt.plot(array)
plt.ylabel('Some meaningful numbers')
plt.xlabel('The x-axis')
plt.title('Wohoo')
plt.show()
# --- pydensecrf
d = crf.DenseCRF(1000, 2)
print('Everything seems to work fine!')
if __name__ == "__main__":
"""The program's entry point."""
app.run(main=main, argv=[sys.argv[0]])
| {
"repo_name": "mrunibe/MIALab",
"path": "bin/hello_world.py",
"copies": "2",
"size": "1397",
"license": "apache-2.0",
"hash": -5885000275245065000,
"line_mean": 22.2833333333,
"line_max": 65,
"alpha_frac": 0.6471009306,
"autogenerated": false,
"ratio": 3.5100502512562812,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5157151181856281,
"avg_score": null,
"num_lines": null
} |
# A hello world program. From the Curio tutorial at
# https://curio.readthedocs.org/en/latest/tutorial.html
#
import curio
import signal
import os
async def countdown(n):
while n > 0:
print('T-minus', n)
await curio.sleep(1)
n -= 1
start_evt = curio.Event()
def fib(n):
if n <= 2:
return 1
else:
return fib(n-1) + fib(n-2)
async def kid():
while True:
try:
print('Can I play?')
await start_evt.wait(timeout=1)
break
except TimeoutError:
print('Wha!?!')
try:
print('Building the Millenium Falcon in Minecraft')
total = 0
for n in range(50):
total += await curio.run_cpu_bound(fib, n)
except curio.CancelledError:
print('Fine. Saving my work.')
async def parent():
print('Parent PID', os.getpid())
kid_task = await curio.new_task(kid())
await curio.sleep(5)
print("Yes, go play")
await start_evt.set()
await curio.SignalSet(signal.SIGHUP).wait()
print("Let's go")
count_task = await curio.new_task(countdown(10))
await count_task.join()
print("We're leaving!")
try:
await kid_task.join(timeout=10)
except TimeoutError:
print('I warned you!')
await kid_task.cancel()
print("Leaving!")
if __name__ == '__main__':
kernel = curio.Kernel(with_monitor=True)
kernel.run(parent())
| {
"repo_name": "ABaldwinHunter/curio-clone",
"path": "examples/hello.py",
"copies": "2",
"size": "1436",
"license": "bsd-3-clause",
"hash": -9002006542629399000,
"line_mean": 22.9333333333,
"line_max": 59,
"alpha_frac": 0.5793871866,
"autogenerated": false,
"ratio": 3.3163972286374133,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4895784415237413,
"avg_score": null,
"num_lines": null
} |
"""A helper class for managing batch normalization state.
This class is designed to simplify adding batch normalization
(http://arxiv.org/pdf/1502.03167v3.pdf) to your model by
managing the state variables associated with it.
Important use note: The function get_assigner() returns
an op that must be executed to save the updated state.
A suggested way to do this is to make execution of the
model optimizer force it, e.g., by:
update_assignments = tf.group(bn1.get_assigner(),
bn2.get_assigner())
with tf.control_dependencies([optimizer]):
optimizer = tf.group(update_assignments)
"""
import tensorflow as tf
class BatchNormalizer(object):
"""Helper class that groups the normalization logic and variables.
Use:
ewma = tf.train.ExponentialMovingAverage(decay=0.99)
bn = BatchNormalizer(depth, 0.001, ewma, True)
update_assignments = bn.get_assigner()
x = bn.normalize(y, train=training?)
(the output x will be batch-normalized).
"""
def __init__(self, depth, epsilon, ewma_trainer, scale_after_norm, keep_prob_prior=1.0, name=None):
with tf.variable_op_scope([self, depth, ewma_trainer, epsilon], name, 'batch_normalizer') as scope:
self.mean = tf.get_variable('mean',
shape=[depth],
initializer=tf.constant_initializer(0.0),
trainable=False)
self.variance = tf.get_variable('variance',
shape=[depth],
initializer=tf.constant_initializer(1.0),
trainable=False)
self.beta = tf.get_variable('beta',
shape=[depth],
initializer=tf.constant_initializer(0.0))
self.gamma = tf.get_variable('gamma',
shape=[depth],
initializer=tf.constant_initializer(1.0))
print (scope.name)
self.ewma_trainer = ewma_trainer
self.epsilon = epsilon
self.keep_prob_prior = keep_prob_prior
def get_assigner(self):
"""Returns an EWMA apply op that must be invoked after optimization."""
return self.ewma_trainer.apply([self.mean, self.variance])
def normalize(self, x, train=True):
"""Returns a batch-normalized version of x."""
if train:
mean, variance = tf.nn.moments(x, [0])
assign_mean = self.mean.assign(mean)
assign_variance = self.variance.assign(tf.mul(variance, self.keep_prob_prior))
with tf.control_dependencies([assign_mean, assign_variance]):
act_bn = tf.mul((x - mean), tf.rsqrt(variance + self.epsilon), name="act_bn")
return tf.add(tf.mul(act_bn, self.gamma), self.beta)
else:
mean = self.ewma_trainer.average(self.mean) or self.epsilon
variance = self.ewma_trainer.average(self.variance) or self.epsilon
local_beta = tf.identity(self.beta)
local_gamma = tf.identity(self.gamma)
act_bn = tf.mul((x-mean), tf.rsqrt(variance + self.epsilon), name="act1_bn")
return tf.add(tf.mul(act_bn, local_gamma), local_beta)
| {
"repo_name": "mikowals/mnist",
"path": "batchnormalizer.py",
"copies": "1",
"size": "3442",
"license": "mit",
"hash": 862960271378793300,
"line_mean": 45.527027027,
"line_max": 103,
"alpha_frac": 0.5694363742,
"autogenerated": false,
"ratio": 4.016336056009335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9978420898433668,
"avg_score": 0.021470306355133334,
"num_lines": 74
} |
"""A helper class for managing batch normalization state.
This class is designed to simplify adding batch normalization
(http://arxiv.org/pdf/1502.03167v3.pdf) to your model by
managing the state variables associated with it.
Important use note: The function get_assigner() returns
an op that must be executed to save the updated state.
A suggested way to do this is to make execution of the
model optimizer force it, e.g., by:
update_assignments = tf.group(bn1.get_assigner(),
bn2.get_assigner())
with tf.control_dependencies([optimizer]):
optimizer = tf.group(update_assignments)
"""
import tensorflow as tf
class ConvolutionalBatchNormalizer(object):
"""Helper class that groups the normalization logic and variables.
Use:
ewma = tf.train.ExponentialMovingAverage(decay=0.99)
bn = ConvolutionalBatchNormalizer(depth, 0.001, ewma, True)
update_assignments = bn.get_assigner()
x = bn.normalize(y, train=training?)
(the output x will be batch-normalized).
"""
def __init__(self, depth, epsilon, ewma_trainer, scale_after_norm):
self.mean = tf.Variable(tf.constant(0.0, shape=[depth]),
trainable=False)
self.variance = tf.Variable(tf.constant(1.0, shape=[depth]),
trainable=False)
self.beta = tf.Variable(tf.constant(0.0, shape=[depth]))
self.gamma = tf.Variable(tf.constant(1.0, shape=[depth]))
self.ewma_trainer = ewma_trainer
self.epsilon = epsilon
self.scale_after_norm = scale_after_norm
def get_assigner(self):
"""Returns an EWMA apply op that must be invoked after optimization."""
return self.ewma_trainer.apply([self.mean, self.variance])
def normalize(self, x, train=True):
"""Returns a batch-normalized version of x."""
if train is not None:
mean, variance = tf.nn.moments(x, [0, 1, 2])
assign_mean = self.mean.assign(mean)
assign_variance = self.variance.assign(variance)
with tf.control_dependencies([assign_mean, assign_variance]):
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, self.beta, self.gamma,
self.epsilon, self.scale_after_norm)
else:
mean = self.ewma_trainer.average(self.mean)
variance = self.ewma_trainer.average(self.variance)
local_beta = tf.identity(self.beta)
local_gamma = tf.identity(self.gamma)
return tf.nn.batch_norm_with_global_normalization(
x, mean, variance, local_beta, local_gamma,
self.epsilon, self.scale_after_norm)
| {
"repo_name": "bperez77/ensemble_colorization",
"path": "batchnorm.py",
"copies": "1",
"size": "2591",
"license": "mit",
"hash": 2712846777727487500,
"line_mean": 38.8615384615,
"line_max": 75,
"alpha_frac": 0.6730991895,
"autogenerated": false,
"ratio": 3.6288515406162465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9781133100195794,
"avg_score": 0.004163525984090649,
"num_lines": 65
} |
"""A helper class to encrypt and decrypt passwords.
Example and documentation provided at the link below is pre-req for
understanding this helper.
https://www.dlitz.net/software/pycrypto/api/current/Crypto.Cipher.AES-module.html
Some key points from the documentation:
The block size determines the AES key size:
16 (AES-128), 24 (AES-192), or 32 (AES-256)
MODE_CFB is chosen as the chaining mode, because it is recommended by the
documentation and example is provided. crypto_initialization_vector is
required for this mode. Otherwise, as a counter-example, the simpliest
MODE_ECB doesn't need the crypto_initialization_vector, but it is deemed not
as strong.
https://www.dlitz.net/software/pycrypto/api/current/Crypto.Cipher.blockalgo-module.html#MODE_CFB
https://www.dlitz.net/software/pycrypto/api/current/Crypto.Cipher.blockalgo-module.html#MODE_ECB
The pycrypto cipher will handle the actual encryption and decryption processes.
https://www.dlitz.net/software/pycrypto/api/current/Crypto.Cipher.AES.AESCipher-class.html
"""
import logging
import os
from Crypto.Cipher import AES
_LOG = logging.getLogger('google_password_generator.password_crypto_helper')
BLOCK_SIZE = 16
class PasswordCryptoHelper(object):
"""A helper class to encrypt and decrypt passwords.
The basic premise of how this helper works is that the password will be
encrypted for storage, and can be retrieved later for decryption, based on
the AES-128 standard.
So, for the purpose of decryption, a password_key will be generated. This
will be passed to the user, which will require the password_key to be
encoded as base-64 strings. Later for decryption, the password_key will be
passed back by the user, decoded back from string, and used by the cipher to
decrypt the encrypted password.
Because the crypto_initialization_vector is needed by the cipher, it will
also be returned, so that it can be stored alongside the encrypted
password. Then, it will also be retrieved later alongside the encrypted
password so that it can be used by the cipher for decryption.
"""
@staticmethod
def EncryptPassword(password):
"""Encrypt password.
Args:
password: a string of the password to be encrypted
Returns:
Byte string of the encrypted password, string of the password key, and
byte string of the crypto initialization vector.
"""
password_key = os.urandom(BLOCK_SIZE)
crypto_initialization_vector = os.urandom(BLOCK_SIZE)
cipher = AES.new(password_key, AES.MODE_CFB, crypto_initialization_vector)
encrypted_password = cipher.encrypt(password)
_LOG.debug('Successfully encrypted password.')
return (encrypted_password, password_key.encode('base-64'),
crypto_initialization_vector)
@staticmethod
def DecryptPassword(encrypted_password_entity, password_key):
"""Decrypt password.
Args:
encrypted_password_entity: datastore entity of the encrypted password
password_key: string of the password key
Returns:
decrypted_password: string of the decrypted password
"""
cipher = AES.new(password_key.decode('base-64'), AES.MODE_CFB,
encrypted_password_entity.crypto_initialization_vector)
decrypted_password = cipher.decrypt(
encrypted_password_entity.encrypted_password)
_LOG.debug('Successfully decrypted password.')
return decrypted_password
| {
"repo_name": "google/googleapps-password-generator",
"path": "password_crypto_helper.py",
"copies": "1",
"size": "3421",
"license": "apache-2.0",
"hash": -6688642634925044000,
"line_mean": 36.1847826087,
"line_max": 96,
"alpha_frac": 0.7521192634,
"autogenerated": false,
"ratio": 4.043735224586288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5295854487986288,
"avg_score": null,
"num_lines": null
} |
"""A helper, CLI tool for working with py_func's defining E2E tests.
This CLI provides methods for printing and submitting Argo workflows defined
by py_funcs.
"""
import fire
import logging
from kubernetes import client as k8s_client
import retrying
import yaml
from kubeflow.testing import argo_build_util
from kubeflow.testing import run_e2e_workflow
from kubeflow.testing import util
# TODO(jlewi): Can we automatically handle the case where py_func points
# to a builder class? Then create the class and call build.
class E2EToolMain:
"""A helper class to add some convenient entry points."""
def show(self, py_func, name=None, namespace=None, output=None,
**kwargs): # pylint: disable=no-self-use
"""Print out the workflow spec.
Args:
py_func: Dotted name of the function defining the workflow
"""
kwargs.update({"name": name, "namespace": namespace, })
workflow = run_e2e_workflow.py_func_import(py_func, kwargs)
if output:
logging.info("Dumping workflow to %s", output)
with open(output, "w") as hf:
hf.write(yaml.safe_dump(workflow))
else:
print(yaml.safe_dump(workflow))
def apply(self, py_func, name=None, namespace=None, # pylint: disable=no-self-use
dry_run=False,
open_in_chrome=False, **kwargs):
"""Create the workflow in the current cluster.
Args:
py_func: Dotted name of the function defining the workflow
name: Name for the workflow
namespace: Namespace to run.
dry_run: If true modifies the graph to change the command
to echo the command and delete any working directory
rather than actually running it.
This is a quick way to check that the Argo graph is valid.
Note: This isn't full proof. We also need to
open_in_chrome: Whether to shell out to chrome to open the
Argo UI.
kwargs: Additional args to pass to the python import function
"""
kwargs.update({"name": name, "namespace": namespace})
workflow = run_e2e_workflow.py_func_import(py_func, kwargs)
util.load_kube_config(print_config=False)
client = k8s_client.ApiClient()
crd_api = k8s_client.CustomObjectsApi(client)
group, version = workflow['apiVersion'].split('/')
if dry_run:
logging.warning("Running in dry-run mode. command and workingDir is "
"being changed for all steps")
for t in workflow["spec"]["templates"]:
if not "container" in t:
continue
c = t["container"]
working_dir = ""
# Remove the working directory because working directory
# might not exist if its created by an earlier step
if "workingDir" in c:
working_dir = c["workingDir"]
del c["workingDir"]
command = c["command"]
command = " ".join(command)
command = "Step will run {0}".format(command)
if working_dir:
command = "{0} in {1}".format(command, working_dir)
command = [
"echo",
"\"{0}\"".format(command)
]
# Change the command to an echo.
c["command"] = command
crd_api.create_namespaced_custom_object(
group=group,
version=version,
namespace=namespace,
plural='workflows',
body=workflow)
# Wait for a status to be returned and print out out
@retrying.retry
def get_wf_status():
result = crd_api.get_namespaced_custom_object(
group=group,
version=version,
namespace=namespace,
plural='workflows',
name=name)
if not "status" in result:
raise ValueError("Workflow object not ready yet.")
return result
result = get_wf_status()
logging.info("Created workflow:\n%s", yaml.safe_dump(result))
# TODO(jlewi): We are asumming the workflow is running in the Kubeflow CI
# cluster. We should try to infer the correct endpoint by looking for an
# appropriate ingress.
ui_url = ("http://testing-argo.kubeflow.org/workflows/kubeflow-test-infra/{0}"
"?tab=workflow".format(name))
logging.info("URL for workflow: %s", ui_url)
if open_in_chrome:
util.run(["google-chrome", ui_url])
def step_logs(self, workflow, step, project="kubeflow-ci", # pylint: disable=no-self-use
open_in_chrome=False):
"""Get a stackdriver link to the logs for a particular step.
Args:
workflow: Name of the workflow
step: The name of the step to get the logs for
"""
url = argo_build_util.logs_link_for_step(workflow, step, project=project)
print("URL for workflow {0} step {1} in project {2}".format(
workflow, step, project))
print(url)
if open_in_chrome:
util.run(["google-chrome", url])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
fire.Fire(E2EToolMain)
| {
"repo_name": "kubeflow/testing",
"path": "py/kubeflow/testing/e2e_tool.py",
"copies": "1",
"size": "5135",
"license": "apache-2.0",
"hash": 2317426944090240500,
"line_mean": 32.3441558442,
"line_max": 90,
"alpha_frac": 0.6311587147,
"autogenerated": false,
"ratio": 3.872549019607843,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5003707734307843,
"avg_score": null,
"num_lines": null
} |
"""a helper for easy creation of gdb pretty-printers"""
import gdb.printing
# in python2 gdb.Value can only be converted to long(), python3 only has int()
try: a=long(1)
except: long=int
pp_registry=dict();
class PPWrapper:
def __init__(self, prefix, val, cb):
self.prefix = prefix
self.val = val
self.cb = cb
def to_string(self):
return self.prefix + self.cb(self.val)
class PPDispatcher(gdb.printing.PrettyPrinter):
def __init__(self):
super(PPDispatcher, self).__init__('gdb-tools')
def __call__(self, val):
prefix = ''
if val.type.code == gdb.TYPE_CODE_PTR:
if long(val) == 0:
return None
prefix = '({0}) {1:#08x} '.format(str(val.type), long(val))
try: val = val.dereference()
except: return None
valtype=val.type.unqualified()
try: cb=pp_registry[valtype.name]
except:
try:
n=valtype.strip_typedefs().name
cb=pp_registry[valtype.strip_typedefs().name]
except:
try: cb=pp_registry[n[0:n.index('<')]+'<>']
except: return None
return PPWrapper(prefix, val, cb)
gdb.printing.register_pretty_printer(None, PPDispatcher(), True)
def PrettyPrinter(arg):
"""@PrettyPrinter decorator.
With a @PrettyPrinter decorator one only needs to write a function
that takes gdb.Value as an argument and returns a string to be
shown by gdb.
Typical usage:
@PrettyPrinter
def some_typename(val):
<convert val to a string and return it>
This creates all necessary classes and register a pretty-printer
for the type "some_typename", be it a typedef'ed type name or
the real underlying type with all typedef's resolved. It also
creates a pretty-printer for a pointer to some_typename.
When a type name is not a valid Python identifier, one can use
@PrettyPrinter("real complex type name")
def does_not_matter(val):
<convert val to a string and return it>
"""
name = getattr(arg, '__name__', arg)
def register(func):
pp_registry[name]=func
return func
if callable(arg):
return register(arg)
return register
| {
"repo_name": "vuvova/gdb-tools",
"path": "pretty_printer/__init__.py",
"copies": "1",
"size": "2298",
"license": "bsd-3-clause",
"hash": -3361648743952572000,
"line_mean": 29.64,
"line_max": 78,
"alpha_frac": 0.6074847694,
"autogenerated": false,
"ratio": 3.9215017064846416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5028986475884641,
"avg_score": null,
"num_lines": null
} |
"""A helper function for parsing and executing parse_format skills."""
import logging
from parse import parse, search
from opsdroid.parsers.regex import calculate_score
_LOGGER = logging.getLogger(__name__)
async def match_format(text, opts):
"""Match a message considering the options."""
case_sensitive = opts["case_sensitive"]
match_fn = parse
if opts["matching_condition"].lower() == "search":
match_fn = search
return match_fn(opts["expression"], text, case_sensitive=case_sensitive)
async def parse_format(opsdroid, skills, message):
"""Parse a message against all parse_format skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "parse_format" in matcher:
opts = matcher["parse_format"]
result = await match_format(message.text, opts)
if result:
message.parse_result = result
_LOGGER.debug(result.__dict__)
for group, value in result.named.items():
message.update_entity(group, value, None)
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
| {
"repo_name": "opsdroid/opsdroid",
"path": "opsdroid/parsers/parseformat.py",
"copies": "3",
"size": "1586",
"license": "apache-2.0",
"hash": -619019288963816600,
"line_mean": 32.7446808511,
"line_max": 76,
"alpha_frac": 0.525851198,
"autogenerated": false,
"ratio": 4.806060606060606,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6831911804060606,
"avg_score": null,
"num_lines": null
} |
"""A helper function for parsing and executing regex skills."""
import logging
import regex
_LOGGER = logging.getLogger(__name__)
async def calculate_score(expression, score_factor):
"""Calculate the score of a regex."""
# The score asymptotically approaches the max score
# based on the length of the expression.
return (1 - (1 / ((len(expression) + 1) ** 2))) * score_factor
async def match_regex(text, opts):
"""Return False if matching does not need to be case sensitive."""
def is_case_sensitive():
if opts["case_sensitive"]:
return False
return regex.IGNORECASE
if opts["matching_condition"].lower() == "search":
matched_regex = regex.search(opts["expression"], text, is_case_sensitive())
elif opts["matching_condition"].lower() == "fullmatch":
matched_regex = regex.fullmatch(opts["expression"], text, is_case_sensitive())
else:
matched_regex = regex.match(opts["expression"], text, is_case_sensitive())
return matched_regex
async def parse_regex(opsdroid, skills, message):
"""Parse a message against all regex skills."""
matched_skills = []
for skill in skills:
for matcher in skill.matchers:
if "regex" in matcher:
opts = matcher["regex"]
matched_regex = await match_regex(message.text, opts)
if matched_regex:
message.regex = matched_regex
for regroup, value in matched_regex.groupdict().items():
message.update_entity(regroup, value, None)
matched_skills.append(
{
"score": await calculate_score(
opts["expression"], opts["score_factor"]
),
"skill": skill,
"config": skill.config,
"message": message,
}
)
return matched_skills
| {
"repo_name": "opsdroid/opsdroid",
"path": "opsdroid/parsers/regex.py",
"copies": "3",
"size": "2049",
"license": "apache-2.0",
"hash": 8124897137846753000,
"line_mean": 36.2545454545,
"line_max": 86,
"alpha_frac": 0.5524646169,
"autogenerated": false,
"ratio": 4.625282167042889,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.667774678394289,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.