text stringlengths 0 1.05M | meta dict |
|---|---|
# A basic Substitution-Permutation Network cipher, implemented by following
# 'A Tutorial on Linear and Differential Cryptanalysis'
# by Howard M. Heys
#
# 02/12/16 Chris Hicks
#
# Basic SPN cipher which takes as input a 16-bit input block and has 4 rounds.
# Each round consists of (1) substitution (2) transposition (3) key mixing
import random
import hashlib
blockSize = 16
verboseState = False
# (1) Substitution: 4x4 bijective, one sbox used for all 4 sub-blocks of size 4. Nibble wise
sbox = {0:0xE, 1:0x4, 2:0xD, 3:0x1, 4:0x2, 5:0xF, 6:0xB, 7:0x8, 8:0x3, 9:0xA, 0xA:0x6, 0xB:0xC, 0xC:0x5, 0xD:0x9, 0xE:0x0, 0xF:0x7} #key:value
sbox_inv = {0xE:0, 0x4:1, 0xD:2, 0x1:3, 0x2:4, 0xF:5, 0xB:6, 0x8:7, 0x3:8, 0xA:9, 0x6:0xA, 0xC:0xB, 0x5:0xC, 0x9:0xD, 0x0:0xE, 0x7:0xF}
# Apply sbox (1) to a 16 bit state and return the result
def apply_sbox(state, sbox):
subStates = [state&0x000f, (state&0x00f0)>>4, (state&0x0f00)>>8, (state&0xf000)>>12]
for idx,subState in enumerate(subStates):
subStates[idx] = sbox[subState]
return subStates[0]|subStates[1]<<4|subStates[2]<<8|subStates[3]<<12
# (2) Permutation. Applied bit-wise
pbox = {0:0, 1:4, 2:8, 3:12, 4:1, 5:5, 6:9, 7:13, 8:2, 9:6, 10:10, 11:14, 12:3, 13:7, 14:11, 15:15}
# (3) Key mixing: bitwise XOR between round subkey and data block input to round
# Key schedule: independant random round keys.
# We take the sha-hash of a 128-bit 'random' seed and then take the first 80-bits
# of the output as out round keys K1-K5 (Each 16 bits long). (not secure, this is just a demo)
def keyGeneration():
k = hashlib.sha1( hex(random.getrandbits(128)).encode('utf-8') ).hexdigest()[2:2+20]
return k
# Simple SPN Cipher encrypt function
def encrypt(pt, k):
state = pt
if verboseState: print('**pt = {:04x}**'.format(state))
subKeys = [ int(subK,16) for subK in [ k[0:4],k[4:8], k[8:12], k[12:16], k[16:20] ] ]
#First three rounds of sinple SPN cipher
for roundN in range(0,3):
if verboseState: print(roundN, end = ' ')
#XOR state with round key (3, subkeys 1,..,4)
state = state^subKeys[roundN]
if verboseState: print (hex(state), end = ' ')
#Break state into nibbles, perform sbox on each nibble, write to state (1)
state = apply_sbox(state,sbox)
if verboseState: print (hex(state), end = ' ')
#Permute the state bitwise (2)
state_temp = 0
for bitIdx in range(0,blockSize):
if(state & (1 << bitIdx)):
state_temp |= (1 << pbox[bitIdx])
state = state_temp
if verboseState: print (hex(state))
# Final round of SPN cipher (k4, sbox, s5)
state = state^subKeys[-2] #penultimate subkey (key 4) mixing
if verboseState: print (str(3), hex(state), end = ' ')
state = apply_sbox(state,sbox)
if verboseState: print (hex(state), end = ' ')
state = state^subKeys[-1] #Final subkey (key 5) mixing
if verboseState: print (hex(state))
if verboseState: print('**ct = {:04x}**'.format(state))
return state
# Simple SPN Cipher decrypt function
def decrypt(ct, k):
state = ct
if verboseState: print('**ct = {:04x}**'.format(state))
#Derive round keys
subKeys = [ int(subK,16) for subK in [ k[0:4],k[4:8], k[8:12], k[12:16], k[16:20] ] ]
if verboseState: print (str(3), hex(state), end= ' ')
#Undo final round key
state = state^subKeys[4]
if verboseState: print (hex(state), end= ' ')
#Apply inverse s-box
state = apply_sbox(state,sbox_inv)
if verboseState: print (hex(state))
#Undo first 3 rounds of simple SPN cipher
for roundN in range(2, -1, -1):
if verboseState: print(roundN, end = ' ')
#XOR state with round key (3, subkeys 4,..,0)
state = state^subKeys[roundN+1]
if verboseState: print (hex(state), end=' ')
#Un-permute the state bitwise (2)
state_temp = 0
for bitIdx in range(0, blockSize):
if(state & (1 << bitIdx)):
state_temp |= (1 << pbox[bitIdx])
state = state_temp
if verboseState: print (hex(state), end = ' ')
#Apply inverse s-box
state = apply_sbox(state,sbox_inv)
if verboseState: print (hex(state))
if verboseState: print(roundN, end = ' ')
#XOR state with round key 0
state = state^subKeys[0]
if verboseState: print('**pt = {:04x}**'.format(state))
return state
if __name__ == "__main__":
# Generate a randon key
k = keyGeneration()
# Produce a CSV of plaintext, key value pairs for cryptanalysis
fileName = 'testData/' + k[0:20] + '.dat'
nVals = 10000
fd_w = open(fileName,"w")
print ('Running basic SPN cipher with key K = {:}'.format(k))
#fd_w.write('test')
for i in range(0, nVals):
fd_w.write('{:04x}, {:04x}\n'.format(i, encrypt(i, k)))
fd_w.close()
print ('Simple SPN plaintext, ciphertext CSV written to ' + fileName)
print ('{:} values written.'.format(nVals))
| {
"repo_name": "hicksc/Basic-SPN-cryptanalysis",
"path": "basic_SPN.py",
"copies": "1",
"size": "5172",
"license": "mit",
"hash": 6868405655758013000,
"line_mean": 35.4225352113,
"line_max": 146,
"alpha_frac": 0.5984145398,
"autogenerated": false,
"ratio": 2.9588100686498855,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40572246084498853,
"avg_score": null,
"num_lines": null
} |
# This code is licensed under the MIT License.
#
# MIT License
#
# Copyright (c) 2016 Luca Vallerini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Luca Vallerini
# E-mail: lucavall90@gmail.com
#
# Date: 2016-10-30
# Last update: 2016-10-31
import random
from sys import exit
# Choose a random word from a dictionary
def choose_a_word(lang):
with open(lang + '.txt', 'r') as dictionary:
lines = dictionary.readlines()
return random.choice(lines).strip().upper()
# Start the game with a given player name and
# a dictionary choose by the player.
def game(player, lang):
word = choose_a_word(lang)
show_word = "-" * len(word)
turn = 0
guesses = ""
hangman(turn, show_word, guesses)
while True:
move = raw_input("Insert your guess: ").upper()
while move in guesses:
move = raw_input("%s already guessed; try another letter: " % move).upper()
guesses += move
if move in word:
print "You're guess is right!"
for i in range(len(word)):
if word[i] == move:
show_word = show_word[:i] + move + show_word[i + 1:]
if show_word.find('-') == -1:
hangman(turn, show_word, guesses)
print "Congratulations %s, you win!" % player
break
else:
turn += 1
print "You're guess is wrong."
if turn >= 6:
hangman(turn, show_word, guesses)
print "Sorry %s, you lost." % player
print "The word is %s." % word
break
hangman(turn, show_word, guesses)
# Print The Hangman board, spaces for the word to guess
# and the guesses done so far.
def hangman(turn, word, guesses):
print " _____"
print " | |"
if turn >= 1:
print " O |"
else:
print " |"
if turn >= 4:
print " /|\ |"
print " | |"
elif turn >= 3:
print " /| |"
print " | |"
elif turn >= 2:
print " | |"
print " | |"
else:
print " |"
print " |"
if turn >= 6:
print " / \ |"
elif turn >= 5:
print " / |"
else:
print " |"
print " ---------"
print word, "[%d letters]" % len(word)
print "Already guessed: " + guesses + "\n"
# Start the game asking the player for his/her name
# and let the player choose for the dictionary to play with.
def start():
print "Hi! Welcome to The Hangman game!"
player = raw_input("What's your name? ").strip().upper()
print "Dictionary available: "
print "1) English"
print "2) Italian"
dictionary = raw_input("Now %s, choose the dictionary you want to play with: " % player).strip()
if dictionary == '1':
game(player, 'english')
elif dictionary == '2':
game(player, 'italian')
else:
print "Wrong choice, quitting..."
exit(0)
start()
| {
"repo_name": "lucavallerini/miscellanea",
"path": "hangman/hangman.py",
"copies": "1",
"size": "4125",
"license": "mit",
"hash": 4226081170446738400,
"line_mean": 28.6762589928,
"line_max": 100,
"alpha_frac": 0.5844848485,
"autogenerated": false,
"ratio": 3.840782122905028,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4925266971405028,
"avg_score": null,
"num_lines": null
} |
# This code is licensed under the MIT License.
#
# MIT License
#
# Copyright (c) 2016 Luca Vallerini
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Luca Vallerini
# E-mail: lucavall90@gmail.com
#
# Date: 2016-10-22
# Last update: 2016-10-26
import numpy as np
# Draw the board game
def draw_board_game(values):
size = len(values)
for i in range(len(values)):
print " ---" * size
row = ""
for j in range(size):
if j == (size - 1):
row += "| " + str(int(values[i, j])) + " |"
else:
row += "| " + str(int(values[i, j])) + " "
print row
print " ---" * size
# Check if someone won
def winner(game):
size = len(game)
for i in range(size):
# check winner by row
tmp_row = game[i, 0]
winner = True
for j in range(size):
if game[i, j] == tmp_row and winner and j == size - 1:
if tmp_row > 0:
return tmp_row
elif game[i, j] == tmp_row and winner:
winner = True
else:
winner = False
# check winner by column
tmp_col = game[0, i]
winner = True
for j in range(size):
if game[j, i] == tmp_col and winner and j == size - 1:
if tmp_col > 0:
return tmp_col
elif game[j, i] == tmp_col and winner:
winner = True
else:
winner = False
# check winner on diagonal
tmp_diag = game[0, 0]
winner = True
for i in range(size):
if game[i, i] == tmp_diag and winner and i == size - 1:
if tmp_diag > 0:
return tmp_diag
elif game[i, i] == tmp_diag and winner:
winner = True
else:
winner = False
# check winner on anti diagonal
tmp_adiag = game[0, size - 1]
winner = True
i, j = 0, size-1
while winner:
if game[i,j] == tmp_adiag and winner and i == size - 1:
winner = False
if tmp_adiag > 0:
return tmp_adiag
elif game[i,j] == tmp_adiag and winner:
winner = True
else:
winner = False
i += 1
j -= 1
return 0 # no one won
def game_play(table_size):
turn = 1
winning = False
table = np.zeros((table_size, table_size))
# Draw empty board
draw_board_game(table)
# Begin the game
while (not winning and turn <= 9):
print "Turn %d" % (turn)
# Player 1 move
move(table, 1)
# Check if player 1 won: if so, terminate
# the game, otherwise go on with player 2 turn.
k = winner(table)
if k > 0:
winning = True
print "Game over. Player %d won!" % k
break
# Player 2 move
move(table, 2)
# Check if player 2 won or if there are no more
# turns: if so, terminate the game, otherwise
# move to the next turn.
k = winner(table)
if k > 0:
winning = True
print "Game over. Player %d won!" % k
break
elif k == 0 and turn == 9:
print "Game over. No one won!"
break
else:
turn += 1
# Check if the move is valid or not
def isMoveValid(t, m):
if int(m[0]) > 0 and int(m[0]) <= len(t) and int(m[1]) > 0 and int(m[1]) <= len(t) and t[int(m[0])-1, int(m[1])-1] == 0:
return True
else:
return False
# Insert your move: if the move is valid, insert it in the
# board and redraw it, otherwise ask for a valid move.
def move(table, player):
move = raw_input("Player %d, your move (e.g. 1 3): " % player).split()
while (not isMoveValid(table, move)):
move = raw_input("Cell already taken or wrong coordinates, try again: ").split()
table[int(move[0])-1, int(move[1])-1] = player
draw_board_game(table)
print "Welcome on Tic Tac Toe game!"
board_size = int(input("Insert the size of the board: "))
print "OK, let the game begin!"
game_play(board_size)
| {
"repo_name": "lucavallerini/miscellanea",
"path": "tictactoe/tictactoe.py",
"copies": "1",
"size": "5377",
"license": "mit",
"hash": -974962534994894000,
"line_mean": 29.7257142857,
"line_max": 124,
"alpha_frac": 0.5458434071,
"autogenerated": false,
"ratio": 3.765406162464986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9671705442292424,
"avg_score": 0.027908825454512457,
"num_lines": 175
} |
"""A basic trie."""
import argparse
import sys
class Trie(object):
def __init__(self):
self.root = {}
def add(self, seq):
node = self.root
for i, x in enumerate(seq):
if x not in node:
node[x] = (False, {})
if i == len(seq) - 1:
node[x] = (True, node[x][1])
else:
is_terminal, node = node[x]
def remove(self, seq):
node = self.root
nodes = []
for i, x in enumerate(seq):
nodes.append(node)
if x not in node:
raise ValueError('Item not found, cannot be removed')
if i == len(seq) - 1:
# Actually remove
node[x] = (False, node[x][1])
else:
is_terminal, node = node[x]
# Clean up
for i in range(len(seq) - 1, -1, -1):
# nodes[i] contains seq[i]
node = nodes[i]
x = seq[i]
is_terminal, next_node = node[x]
if not is_terminal and not next_node:
del node[x]
else:
break
def contains(self, seq):
node = self.root
for x in seq:
if x not in node:
return False
is_terminal, node = node[x]
return is_terminal
def contains_prefix(self, seq):
node = self.root
for x in seq:
if x not in node:
return False
is_terminal, node = node[x]
return True
def get_node(self, seq):
node = self.root
for x in seq:
if x not in node:
return None
is_terminal, node = node[x]
return node
def __iter__(self):
stack = [((), self.root)]
while stack:
prefix, node = stack.pop()
for k in node:
new_prefix = prefix + (k,)
is_terminal, new_node = node[k]
if is_terminal:
yield new_prefix
stack.append((new_prefix, new_node))
def main():
trie = Trie()
print 'Running basic tests...'
trie.add((0,))
trie.add((1, 2, 3))
assert trie.contains((0,)) == True
assert trie.contains((1, 2, 3)) == True
assert trie.contains((1,)) == False
assert trie.contains_prefix((1,)) == True
assert trie.contains((1, 2)) == False
assert trie.contains_prefix((1, 2)) == True
assert trie.contains((2,)) == False
trie.add((1, 2))
trie.add((1, 4))
trie.add((5, 6))
assert trie.contains((1, 2, 3)) == True
assert trie.contains((1, 2)) == True
assert trie.contains_prefix((1, 2)) == True
assert trie.contains((2,)) == False
assert trie.contains_prefix((2,)) == False
assert trie.contains((5,)) == False
assert trie.contains((1, 4)) == True
assert trie.contains((5, 6)) == True
assert trie.contains_prefix((5,)) == True
trie.remove((1, 2, 3))
assert trie.contains((1, 2, 3)) == False
assert trie.contains((1, 2)) == True
assert trie.contains_prefix((1, 2)) == True
trie.add((1, 2, 3))
trie.remove((1, 2))
trie.add((1,))
assert trie.contains((1, 2, 3)) == True
assert trie.contains((1, 2)) == False
assert trie.contains((1,)) == True
assert trie.contains_prefix((1, 2)) == True
assert set(trie) == set([(0,), (1,), (1, 2, 3), (1, 4), (5, 6)])
print trie.root
print 'All pass!'
if __name__ == '__main__':
main()
| {
"repo_name": "robinjia/nectar",
"path": "nectar/base/trie.py",
"copies": "1",
"size": "3087",
"license": "mit",
"hash": 6380779751943238000,
"line_mean": 25.3846153846,
"line_max": 66,
"alpha_frac": 0.5558794947,
"autogenerated": false,
"ratio": 3.121334681496461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4177214176196461,
"avg_score": null,
"num_lines": null
} |
"""A basic vocabulary class."""
import collections
UNK_TOKEN = '<UNK>'
UNK_INDEX = 0
class Vocabulary(object):
def __init__(self, unk_threshold=0):
"""Initialize the vocabulary.
Args:
unk_threshold: words with <= this many counts will be considered <UNK>.
"""
self.unk_threshold = unk_threshold
self.counts = collections.Counter()
self.word2index = {UNK_TOKEN: UNK_INDEX}
self.word_list = [UNK_TOKEN]
def add_word(self, word, count=1):
"""Add a word (may still map to UNK if it doesn't pass unk_threshold)."""
self.counts[word] += count
if word not in self.word2index and self.counts[word] > self.unk_threshold:
index = len(self.word_list)
self.word2index[word] = index
self.word_list.append(word)
def add_words(self, words):
for w in words:
self.add_word(w)
def add_sentence(self, sentence):
self.add_words(sentence.split(' '))
def add_sentences(self, sentences):
for s in sentences:
self.add_sentence(s)
def add_word_hard(self, word):
"""Add word, make sure it is not UNK."""
self.add_word(word, count=(self.unk_threshold+1))
def get_word(self, index):
return self.word_list[index]
def get_index(self, word):
if word in self.word2index:
return self.word2index[word]
return UNK_INDEX
def indexify_sentence(self, sentence):
return [self.get_index(w) for w in sentence.split(' ')]
def indexify_list(self, elems):
return [self.get_index(w) for w in elems]
def recover_sentence(self, indices):
return ' '.join(self.get_word(i) for i in indices)
def has_word(self, word):
return word in self.word2index
def __contains__(self, word):
return self.has_word(word)
def size(self):
# Report number of words that have been assigned an index
return len(self.word2index)
def __len__(self):
return self.size()
def __iter__(self):
return iter(self.word_list)
| {
"repo_name": "robinjia/nectar",
"path": "nectar/base/vocabulary.py",
"copies": "1",
"size": "1946",
"license": "mit",
"hash": -3435220087128174000,
"line_mean": 25.6575342466,
"line_max": 78,
"alpha_frac": 0.6536485098,
"autogenerated": false,
"ratio": 3.3379073756432245,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491555885443224,
"avg_score": null,
"num_lines": null
} |
#A basic way of caching files associated with URLs
from datetime import datetime
import os
import urllib2
import tempfile
import json
import socket
import utilities
import shutil
class URLCache(object):
TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def __init__(self, folder):
self._folder = os.path.join(folder, 'cache')
self._file = os.path.join(folder, 'cache.json')
def __enter__(self):
if not os.path.exists(self._folder):
os.makedirs(self._folder)
try:
fyle = open(self._file, 'r')
except IOError:
#create the file and try again.
open(self._file, 'a').close()
fyle = open(self._file, 'r')
try:
self._cache = json.load(fyle)
except ValueError:
self._cache = dict()
fyle.close()
return self
def __exit__(self, typ, value, traceback):
self.flush()
with open(self._file, 'w+') as fyle:
json.dump(self._cache, fyle, indent=2)
def remove(self, url):
if url in self._cache:
entry = self._cache[url]
if os.path.isfile(entry['resource']):
os.remove(entry['resource'])
del self._cache[url]
def flush(self):
flushlist = list()
for url, entry in self._cache.iteritems():
if not os.path.isfile(entry['resource']) or utilities.strptime(entry['expiry'], self.TIME_FORMAT) < datetime.utcnow():
flushlist.append(url)
for url in flushlist:
self.remove(url)
def erase(self):
os.remove(self._file)
shutil.rmtree(self._folder)
def get(self, url, expiry_callback, resource_callback=None):
"""
Checks to see if an item is in cache
"""
try:
entry = self._cache[url]
if not os.path.isfile(entry['resource']) or utilities.strptime(entry['expiry'], self.TIME_FORMAT) < datetime.utcnow():
raise InvalidCacheError
else:
return entry['resource']
except (KeyError, InvalidCacheError):
#(src, headers) = urllib.urlretrieve(url)
try:
response = urllib2.urlopen(url)
except (socket.timeout, urllib2.URLError) as e:
e.args = (str(e), url)
raise
page = response.read()
response.close()
tmp = tempfile.NamedTemporaryFile(dir=self._folder, delete=False)
tmp.write(page)
tmp.close()
expiry = expiry_callback(tmp.name)
if resource_callback:
resource_callback(tmp.name)
self._cache[url] = {'resource': tmp.name, 'expiry': expiry.strftime(self.TIME_FORMAT)}
return tmp.name
class InvalidCacheError(Exception):
pass | {
"repo_name": "aplicatii-romanesti/allinclusive-kodi-pi",
"path": ".kodi/addons/weather.metoffice/src/metoffice/urlcache.py",
"copies": "1",
"size": "2858",
"license": "apache-2.0",
"hash": -1258609642816426000,
"line_mean": 31.1235955056,
"line_max": 130,
"alpha_frac": 0.5542337299,
"autogenerated": false,
"ratio": 4.088698140200286,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5142931870100286,
"avg_score": null,
"num_lines": null
} |
# A basic web server using sockets
import socket
PORT = 8090
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
data = clientsocket.recv(1024)
print(data)
web_contents = "<h1>Received</h1>"
f = open("myhtml.html", "r")
web_contents = f.read()
f.close()
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
ip = socket.gethostbyname(hostname)
# Let's use better the local interface name
hostname = "10.10.104.17"
try:
serversocket.bind((ip, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
| {
"repo_name": "acs-test/openfda",
"path": "PER_2017-18/clientServer/P1/server_web.py",
"copies": "1",
"size": "1505",
"license": "apache-2.0",
"hash": 2865624443639080400,
"line_mean": 30.3541666667,
"line_max": 78,
"alpha_frac": 0.673089701,
"autogenerated": false,
"ratio": 3.5245901639344264,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9647377913826249,
"avg_score": 0.010060390221635428,
"num_lines": 48
} |
# A basic web server using sockets
import socket
PORT = 8092
MAX_OPEN_REQUESTS = 5
def process_client(clientsocket):
print(clientsocket)
print(clientsocket.recv(1024))
web_contents = "<h1>Received</h1>"
web_headers = "HTTP/1.1 200"
web_headers += "\n" + "Content-Type: text/html"
web_headers += "\n" + "Content-Length: %i" % len(str.encode(web_contents))
clientsocket.send(str.encode(web_headers + "\n\n" + web_contents))
clientsocket.close()
# create an INET, STREAMing socket
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket to a public host, and a well-known port
hostname = socket.gethostname()
# Let's use better the local interface name
hostname = "localhost"
try:
serversocket.bind((hostname, PORT))
# become a server socket
# MAX_OPEN_REQUESTS connect requests before refusing outside connections
serversocket.listen(MAX_OPEN_REQUESTS)
while True:
# accept connections from outside
print ("Waiting for connections at %s %i" % (hostname, PORT))
(clientsocket, address) = serversocket.accept()
# now do something with the clientsocket
# in this case, we'll pretend this is a non threaded server
process_client(clientsocket)
except socket.error:
print("Problemas using port %i. Do you have permission?" % PORT)
| {
"repo_name": "acs-test/openfda",
"path": "practice-basic-web-server/server_web.py",
"copies": "3",
"size": "1362",
"license": "apache-2.0",
"hash": 1799987958938211300,
"line_mean": 32.2195121951,
"line_max": 78,
"alpha_frac": 0.6930983847,
"autogenerated": false,
"ratio": 3.661290322580645,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5854388707280644,
"avg_score": null,
"num_lines": null
} |
""" Abaxis Vet Scan - VS2
"""
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from . import AbaxisVetScanCSVParser, AbaxisVetScanImporter
import json
import traceback
title = "Abaxis VetScan - VS2"
def Import(context, request):
""" Abaxix VetScan VS2 analysis results
"""
infile = request.form['data_file']
fileformat = request.form['format']
artoapply = request.form['artoapply']
override = request.form['override']
sample = request.form.get('sample',
'requestid')
instrument = request.form.get('instrument', None)
errors = []
logs = []
warns = []
# Load the most suitable parser according to file extension/options/etc...
parser = None
if not hasattr(infile, 'filename'):
errors.append(_("No file selected"))
if fileformat == 'csv':
parser = AbaxisVetScanCSVVS2Parser(infile)
else:
errors.append(t(_("Unrecognized file format ${fileformat}",
mapping={"fileformat": fileformat})))
if parser:
# Load the importer
status = ['sample_received', 'attachment_due', 'to_be_verified']
if artoapply == 'received':
status = ['sample_received']
elif artoapply == 'received_tobeverified':
status = ['sample_received', 'attachment_due', 'to_be_verified']
over = [False, False]
if override == 'nooverride':
over = [False, False]
elif override == 'override':
over = [True, False]
elif override == 'overrideempty':
over = [True, True]
sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
if sample == 'requestid':
sam = ['getRequestID']
if sample == 'sampleid':
sam = ['getSampleID']
elif sample == 'clientsid':
sam = ['getClientSampleID']
elif sample == 'sample_clientsid':
sam = ['getSampleID', 'getClientSampleID']
importer = AbaxisVetScanVS2Importer(parser=parser,
context=context,
idsearchcriteria=sam,
allowed_ar_states=status,
allowed_analysis_states=None,
override=over,
instrument_uid=instrument)
tbex = ''
try:
importer.process()
except:
tbex = traceback.format_exc()
errors = importer.errors
logs = importer.logs
warns = importer.warns
if tbex:
errors.append(tbex)
results = {'errors': errors, 'log': logs, 'warns': warns}
return json.dumps(results)
class AbaxisVetScanCSVVS2Parser(AbaxisVetScanCSVParser):
def getAttachmentFileType(self):
return "Abaxix VetScan VS2 "
class AbaxisVetScanVS2Importer(AbaxisVetScanImporter):
def getKeywordsToBeExcluded(self):
return []
| {
"repo_name": "hocinebendou/bika.gsoc",
"path": "bika/lims/exportimport/instruments/abaxis/vetscan/vs2.py",
"copies": "3",
"size": "3067",
"license": "mit",
"hash": 3257210682654853600,
"line_mean": 33.0777777778,
"line_max": 78,
"alpha_frac": 0.5572220411,
"autogenerated": false,
"ratio": 4.301542776998597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010913964203437888,
"num_lines": 90
} |
# [['a', '+', ['b', '/', 'c', '*', 2], '-', <__main__.mathop object at 0x03694870>]]
import operator
from .lexer import mathop
op_map = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv
}
asm_map = {
"+": "ADD",
"-": "SUB",
"*": "MUL",
"/": "DIV"
}
class no_depth_list(list):
"""Class that does not allow any nested lists to be appended, any iterables appended will be unpacked first """
def __lshift__(self, other):
self.append(other)
def parse(expr):
resolved = []
if isinstance(expr, (int, str)):
return expr
while expr:
i = expr.pop()
if isinstance(i, list):
for i in parse(i):
resolved.append(i)
elif isinstance(i, mathop):
for i in parse(i.children):
resolved.append(i)
elif i in ["+", "-", "*", "/"]:
next_ = parse(expr.pop())
prev = resolved.pop()
resolved += next_
resolved.append(prev)
resolved.append(i)
else: # string or int
resolved.append(i)
return resolved
def stack_to_ops(stack):
out = no_depth_list()
for i in stack:
if isinstance(i, int):
out << f"PUSHSTK #{i}"
elif i in ["+", "-", "*", "/"]:
out << "POPSTK @ACC"
out << "POPSTK @EAX"
out << f"{asm_map[i]} @EAX"
out << "PUSHSTK @ACC"
elif isinstance(i, str):
out << f"PUSHSTK {self.get_variable(i)}"
elif isinstance(i, functionCallOB):
for i in self.assemble_list(i):
out << i
out << "PUSHSTK @RET"
if __name__ == "__main__":
print(parse([['a', '+', ['b', '/', 'c', '*', 2], '-', 4]]))
| {
"repo_name": "nitros12/Cpu_emulator",
"path": "wew compiler/disused/math_op.py",
"copies": "1",
"size": "1799",
"license": "mit",
"hash": -6337805026242189000,
"line_mean": 23.9861111111,
"line_max": 115,
"alpha_frac": 0.4674819344,
"autogenerated": false,
"ratio": 3.5343811394891946,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4501863073889194,
"avg_score": null,
"num_lines": null
} |
#< ab || cd > = [[ a,b ] , [ c,d ]]
#A script to find the optimal alignment of diagrams used in the CCDT t3 amplitude equation
def perm(a, i,e):
ai= a[1][e]
ae = a[1][i]
api = a[3][e]
ape = a[3][i]
a[1][i] = ai
a[1][e] = ae
a[3][i] = api
a[3][e] = ape
def perm2(a, i,e):
ai= a[0][e]
ae = a[0][i]
api = a[2][e]
ape = a[2][i]
a[0][i] = ai
a[0][e] = ae
a[2][i] = api
a[2][e] = ape
def align(a, b, c, left_indices):
#1. assign all left_indices in a[0], a[2]
a_ = [[],[],[],[]]
b_ = [[],[],[],[]]
c_ = [[],[],[],[]]
for i in range(len(a[0])):
#bra
if a[0][i] in ["d", "e", "f"]:
#move to ket
a_[1].append(a[0][i])
a_[3].append(a[2][i])
if a[0][i] in ["a", "b", "c"]:
#keep in bra
a_[0].append(a[0][i])
a_[2].append(a[2][i])
#ket
if a[1][i] in ["i", "j", "k"]:
#move to bra
a_[0].append(a[1][i])
a_[2].append(a[3][i])
if a[1][i] in ["l", "m", "n"]:
#keep in ket
a_[1].append(a[1][i])
a_[3].append(a[3][i])
#2. assign all left indices in b to a_[1]
for i in range(len(b[0])):
if b[0][i] in a_[1]:
b_[0].append(b[0][i])
b_[2].append(b[2][i])
if b[0][i] not in a_[1]:
b_[1].append(b[0][i])
b_[3].append(b[2][i])
for i in range(len(b[0])):
if b[1][i] in a_[1]:
b_[0].append(b[1][i])
b_[2].append(b[3][i])
if b[1][i] not in a_[1]:
b_[1].append(b[1][i])
b_[3].append(b[3][i])
#ensure correct order in a[1]
#a_temp = a_
print b_
print a_
for i in range(len(a_[1])):
if a_[1][i] != b_[0][i]:
for e in range(len(a_[1])):
if a_[1][e] == b_[0][i]:
perm(a_, e,i)
#3. align c to b_[1]
for i in range(len(c[0])):
if c[0][i] in b_[1]:
c_[0].append(c[0][i])
c_[2].append(c[2][i])
if c[0][i] not in b_[1]:
c_[1].append(c[0][i])
c_[3].append(c[2][i])
for i in range(len(c[0])):
if c[1][i] in b_[1]:
c_[0].append(c[1][i])
c_[2].append(c[3][i])
if c[1][i] not in b_[1]:
c_[1].append(c[1][i])
c_[3].append(c[3][i])
for i in range(len(c_[0])):
if b_[1][i] != c_[0][i]:
for e in range(len(c_[0])):
if c_[0][e] == b_[1][i]:
perm2(c_, i,e)
#print "A:", a_
#print "B:", b_
#print "C:", c_
return a_,b_,c_
def diagsort(a,c):
#align diagram to the T3 amplitude
nr = {"a": "p", "b": "q","c": "r", "i": "s","j": "t", "k": "u" }
retrs = "update_as_"
for i in range(len(a[0])):
retrs += nr[a[0][i]]
retrs += "_"
for i in range(len(c[1])):
retrs += nr[c[1][i]]
return retrs
#align to t3 amp
def setup(a,b,c):
#assign general indices pqrs
a = [a[0], a[1], [],[]]
b = [b[0], b[1], [],[]]
c = [c[0], c[1], [],[]]
indx = "pqrstu"
n = 0
for i in range(len(a[0])):
a[2].append(indx[n])
n+= 1
for i in range(len(a[1])):
a[3].append(indx[n])
n+= 1
n = 0
for i in range(len(b[0])):
b[2].append(indx[n])
n+= 1
for i in range(len(b[1])):
b[3].append(indx[n])
n+= 1
n = 0
for i in range(len(c[0])):
c[2].append(indx[n])
n+= 1
for i in range(len(c[1])):
c[3].append(indx[n])
n+= 1
#identify left indices
left_indices = []
for i in range(len(a[0])):
if a[0][i] in ["a", "b", "c"]:
left_indices.append(a[0][i])
if a[1][i] in ["i", "j", "k"]:
left_indices.append(a[1][i])
a,b,c = align(a,b,c, left_indices)
"""
#align indices in a,b
diag = [[],[]]
ap = [[],[]]
bp = [[],[]]
cp = [[],[]]
#1. identify open lines in a
for i in range(len(a)):
if a[0][i] in ["d", "e", "f"]:
diag[0].append(a[0][i])
ap[0].append(a[0][i])
#a_s.append(A[0][i])
if a[1][i] in ["i", "j", "k"]:
diag[0].append(a[1][i])
ap[0].append(a[1][i])
#a_s.append(A[1][i])
if a[0][i] not in ["d", "e", "f"]:
ap[1].append(a[0][i])
if a[1][i] not in ["l", "m", "n"]:
ap[1].append(a[1][i])
#align closed lines in a-b
for i in range(len(ap[1])):
pass
a_s = "."
b_s = "."
c_s = "."
"""
#2. use internal lines from a to form first part of b
return a,b,c
def generate_t2t2(v,t2,t3):
#measure "level of alignment" of existing tensors
#we ideally want it to begin with abc, and end with ijk
#contractions occur over lmn and def
t3ind = 0
contractions = ["l","m","d","e"]
#begin by evaluate where to place the t3 amplitudes
for i in range(len(t3[0])):
if t3[0][i] in ["a", "b"]:
t3ind += 1
if t3[1][i] in ["i", "j"]:
t3ind -= 1
#inspect if t2 has a preferred placement
for i in range(len(t2[0])):
if t2[0][i] in ["a", "b"]:
t3ind += 1
if t2[1][i] in ["i", "j"]:
t3ind -= 1
#print t3ind
if t3ind >= 0:
#place t3 first
a,b,c = setup(t3, v, t2)
#a = t3
t3str = "t3."
for i in range(len(a[2])):
t3str += a[2][i]
t3str += "_"
for i in range(len(a[3])):
t3str += a[3][i]
t3str += "()"
t2str = "t2."
for i in range(len(c[2])):
t2str += c[2][i]
t2str += "_"
for i in range(len(c[3])):
t2str += c[3][i]
t2str += "()"
vint = "vhhpp."
for i in range(len(b[2])):
vint += b[2][i]
vint += "_"
for i in range(len(b[3])):
vint += b[3][i]
vint += "()"
matmult = t3str + "*" + vint + "*" + t2str
else:
#place t3 last
a,b,c = setup(t2, v, t3)
t2str = "t3."
for i in range(len(a[2])):
t2str += a[2][i]
t2str += "_"
for i in range(len(a[3])):
t2str += a[3][i]
t2str += "()"
t3str = "t2."
for i in range(len(c[2])):
t3str += c[2][i]
t3str += "_"
for i in range(len(c[3])):
t3str += c[3][i]
t3str += "()"
vint = "vhhpp."
for i in range(len(b[2])):
vint += b[2][i]
vint += "_"
for i in range(len(b[3])):
vint += b[3][i]
vint += "()"
matmult = t2str + "*" + vint + "*" + t3str
#print matmult
retstr = diagsort(a,c)
strng = retstr + "(" + matmult + ")"
#print a
#print b
#print c
return a, b, c, strng
def generate(v,t2,t3):
#measure "level of alignment" of existing tensors
#we ideally want it to begin with abc, and end with ijk
#contractions occur over lmn and def
t3ind = 0
contractions = ["l","m","d","e"]
#begin by evaluate where to place the t3 amplitudes
for i in range(len(t3[0])):
if t3[0][i] in ["a", "b", "c"]:
t3ind += 1
if t3[1][i] in ["i", "j", "k"]:
t3ind -= 1
#inspect if t2 has a preferred placement
for i in range(len(t2[0])):
if t2[0][i] in ["a", "b", "c"]:
t3ind += 1
if t2[1][i] in ["i", "j", "k"]:
t3ind -= 1
#print t3ind
if t3ind >= 0:
#place t3 first
a,b,c = setup(t3, v, t2)
#a = t3
t3str = "t3."
for i in range(len(a[2])):
t3str += a[2][i]
t3str += "_"
for i in range(len(a[3])):
t3str += a[3][i]
t3str += "()"
t2str = "t2."
for i in range(len(c[2])):
t2str += c[2][i]
t2str += "_"
for i in range(len(c[3])):
t2str += c[3][i]
t2str += "()"
vint = "vhhpp."
for i in range(len(b[2])):
vint += b[2][i]
vint += "_"
for i in range(len(b[3])):
vint += b[3][i]
vint += "()"
matmult = t3str + "*" + vint + "*" + t2str
else:
#place t3 last
a,b,c = setup(t2, v, t3)
t2str = "t3."
for i in range(len(a[2])):
t2str += a[2][i]
t2str += "_"
for i in range(len(a[3])):
t2str += a[3][i]
t2str += "()"
t3str = "t2."
for i in range(len(c[2])):
t3str += c[2][i]
t3str += "_"
for i in range(len(c[3])):
t23tr += c[3][i]
t3str += "()"
vint = "vhhpp."
for i in range(len(b[2])):
vint += b[2][i]
vint += "_"
for i in range(len(b[3])):
vint += b[3][i]
vint += "()"
matmult = t2str + "*" + vint + "*" + t3str
#print matmult
retstr = diagsort(a,c)
strng = retstr + "(" + matmult + ")"
#print a
#print b
#print c
return a, b, c, strng
def tex_pre(v,t2,t3):
tx = " \\sum_{"
for i in range(len(v[0])):
tx += v[0][i] + v[1][i]
tx += "} "
tx += "\\langle %s %s \\vert \\vert %s %s \\rangle " % (v[0][0], v[0][1], v[1][0], v[1][1])
tx += "t^{%s %s}_{%s %s}" % (t2[0][0], t2[0][1], t2[1][0], t2[1][1])
#tx += "t^{%s %s %s}_{%s %s %s} " % (t3[0][0], t3[0][1], t3[0][2], t3[1][0], t3[1][1],t3[1][2])
tx += "t^{%s %s}_{%s %s} " % (t3[0][0], t3[0][1], t3[1][0], t3[1][1])
return tx
def tex_aligned(a,b,c):
tx = " \\sum_{"
for i in b[0]:
tx+=i
tx += "}"
tx += " \\sum_{"
for i in b[1]:
tx+=i
tx += "}"
tx += " t^{"
for i in a[0]:
tx += i
tx += "}_{"
for i in a[1]:
tx += i
tx += "} \\langle "
for i in b[0]:
tx += i
tx += "\\vert \\vert "
for i in b[1]:
tx += i
tx +="\\rangle t^{"
for i in c[0]:
tx += i
tx += "}_{"
for i in c[1]:
tx += i
tx += "} "
return tx
def gen_entry(v,t2,t3):
#Generate table entry for diagram given by t2,t3,v
tx1 = tex_pre(v,t2,t3)
a,b,c, strng = generate_t2t2(v,t2,t3)
tx2 = tex_aligned(a,b,c)
return "$$ " + tx1 + " \\rightarrow " + tx2 + "$$" , strng
v = [["l"],["d"]]
t2 = [["a","d"],["i","j"]]
t3 = [["b","c"],["l","k"]]
ltx, strng = gen_entry(v,t2,t3)
print ltx
print strng
def realign_diagram(d1,d2):
n = {a:p, b:q, c:r, i:s, j:t, k:u}
| {
"repo_name": "CompPhysics/ThesisProjects",
"path": "doc/MSc/msc_students/former/AudunHansen/Audun/Pythonscripts/t3_align.py",
"copies": "1",
"size": "11625",
"license": "cc0-1.0",
"hash": -7462734437164940000,
"line_mean": 23.1623376623,
"line_max": 99,
"alpha_frac": 0.3591397849,
"autogenerated": false,
"ratio": 2.8161337209302326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8513572329331243,
"avg_score": 0.03234023529979786,
"num_lines": 462
} |
# a-b-c-d-e-f-g
# i have gummy bears chasing me
# one is red, one is blue
# one is chewing on my shoe
# now i am running for my life
# because the red one has a knife
import codecs
from Crypto.Cipher import AES
class Secrets(object):
"""Collection of functions that are utilities for encryption and Azure Key Vault management."""
# TODO: (Azure Key Vault) see about getting the key integrated into Azure Key Vault
_Key = "This is a key123"
_IV = "This is an IV456"
# simple padding and unpadding functions
_blockSize = 16
_pad = lambda s: s + (Secrets._blockSize - len(s) % Secrets._blockSize)*chr(Secrets._blockSize - len(s) % Secrets._blockSize)
_unpad = lambda s : s[ : -ord(s[len(s)-1 : ])]
@staticmethod
def _encryptContents(content) :
"""Encrypt content using mode, 'AES.MODE_CBC'."""
# TODO: (Azure Key Vault) see about getting the key integrated into Azure Key Vault
# encrypt the content
encryption_suite = AES.new(Secrets._Key, AES.MODE_CBC, Secrets._IV)
cipher_text = encryption_suite.encrypt(Secrets._pad(content))
return cipher_text
@staticmethod
def _decryptContents(content) :
"""Decrypt content using mode, 'AES.MODE_CBC'."""
# TODO: (Azure Key Vault) see about getting the key integrated into Azure Key Vault
# decrypt content
decryption_suite = AES.new(Secrets._Key, AES.MODE_CBC, Secrets._IV)
plain_text = decryption_suite.decrypt(content)
return Secrets._unpad(plain_text)
# end of class Secrets
| {
"repo_name": "andlin666/DataCachePhase1",
"path": "DataCachePhase1/Secrets.py",
"copies": "1",
"size": "1572",
"license": "apache-2.0",
"hash": 8449405470460501000,
"line_mean": 37.3414634146,
"line_max": 129,
"alpha_frac": 0.6634860051,
"autogenerated": false,
"ratio": 3.432314410480349,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45958004155803495,
"avg_score": null,
"num_lines": null
} |
# a-b-c-d-e-f-g
# i have gummy bears chasing me
# one is red, one is blue
# one is chewing on my shoe
# now i am running for my life
# because the red one has a knife
import sys
import json
import Secrets
class DataConnection(object):
"""Class that encapsulates account information and credentials for Azure Storage."""
# static members.
accountName = "Account Name"
accountKey = "Account Key"
accountKind = "Account Kind"
notYetImplementedMsg = "Only Azure Storage accounts are currently supported."
azureAccount = "azure"
_accountName = None
_accountKey = None
_accountKind = None
def __init__(self, accountname, accountkey, kind=DataConnection.azureAccount):
if DataConnection.azureAccount != kind:
raise NotImplementedError(DataConnection.notYetImplementedMsg)
# TODO: expand and update kind information
self._accountName = accountname
self._accountKey = accountkey
self._accountKind = kind
def ConnectionInfo(self):
"""Display account name and account kind."""
if (self._accountKind == "azure"):
print("%s: %s" % (DataConnection.accountName, self._accountName))
print("%s: %s" % (DataConnection.accountKind, self._accountKind))
else:
raise NotImplementedError(DataConnection.notYetImplementedMsg)
def ExportToJson(self, filepath):
"""Serialize this instance to JSON."""
accountinfo = json.dumps({DataConnection.accountName : self._accountName ,
DataConnection.accountKey : self._accountKey,
DataConnection.accountKind : self._accountKind})
encryptedinfo = Secrets._encryptContents(accountinfo)
filehandle = open(filepath, 'wb')
filehandle.write(encryptedinfo)
print("Account info has been stored to '%s'" % filepath)
return True
@staticmethod
def ImportFromJson(filepath):
"""Deserialize an instance from JSON."""
filecontent = open(filepath, 'rb').read()
encryptedinfo = json.loads(filecontent)
accountinfo = Secrets._decryptContents(encryptedinfo)
return DataConnection(accountinfo.get(DataConnection.accountName),
accountinfo.get(DataConnection.accountKey),
accountinfo.get(DataConnection.accountKind))
# end class DataConnection
| {
"repo_name": "andlin666/DataCachePhase1",
"path": "DataCachePhase1/DataConnection.py",
"copies": "1",
"size": "2451",
"license": "apache-2.0",
"hash": 1307915772245715500,
"line_mean": 35.5820895522,
"line_max": 88,
"alpha_frac": 0.6532027744,
"autogenerated": false,
"ratio": 4.424187725631769,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0028613525726552246,
"num_lines": 67
} |
# a-b-c-d-e-f-g
# i have gummy bears chasing me
# one is red, one is blue
# one is chewing on my shoe
# now i am running for my life
# because the red one has a knife
import sys
import os
import traceback
from numpy.random import randint
from azure.storage.blob import BlockBlobService
from DataConnection import DataConnection
from ContainerView import ContainerView
# fluff
TEXTBOLD = '\033[1m'
TEXTFAIL = '\033[91m'
TEXTEND = '\033[0m'
# list of supported file extensions
IMAGEEXTENSION = [ ".jpg", ".jpeg", ".png" ]
DELIMITEDEXTENSION = [ ".csv", ".tsv" ]
class DataCache(object):
"""Base class for Azure blob data management. DataCache though represent an abtraction for data.
This can include:
(1) Local files, e.g. CVS or serialized objects
(2) Cloud based files in Azure Object Store (a.k.a. Azure Blobs)
(3) Azure SQL
(4) Pendleton artifacts
(5) Encapsualtion of other services, e.g. ADF
"""
#TODO: some more TODO's that I will get to on Tuesday ... the rest are scattered through the code
#TODO: write proper help comments so that print(obj.__doc__) works!
#TODO: need a method for refreshing the cache
#TODO: need a method for writing to blob
#TODO: need a method for creating containers
#TODO: need simple methods for deleting containers
_dataConnection = None
_blobService = None
# target location
_islocalpath = True
_path = None
# cached values
_containers = {}
# start the properties section
@property
def Path(self):
return self._path
@Path.setter
def Path(self, path):
self._path = path
# DESIGN: the main point is that DataCaches should not be allowed to be
# DESIGN: intantiated if the DataConnection is invalid!
def __init__(self, dataconnection = DataConnection("","")):
self._dataConnection = dataconnection
try:
self._blobService = BlockBlobService(self._dataConnection._accountName, self._dataConnection._accountKey)
self._containers = DataCache._buildContainerViews(self._blobService)
except Exception as e:
print(TEXTBOLD + TEXTFAIL + "Unable to create Blob Service due to invalid DataConnection. ---->" + TEXTEND)
print(TEXTBOLD + TEXTFAIL + "\t%s" % e + TEXTEND)
print()
raise e
#TODO: sketch out the case for Azure Object Store, then see how or if
#TODO: any of it actually generalizes! I just want to learn the SDK and
#TODO: get started with the Kaggle in full. :)
# print connection information
def ConnectionInfo(self):
print("-----------------------------------------------------")
print("Connection Info: ")
self._dataConnection.ConnectionInfo()
print("-----------------------------------------------------")
print("-----------------------------------------------------")
print("Blob Service Info: ")
print("Account Name: " + self._blobService.account_name)
print("Blob Type: " + self._blobService.blob_type)
print("-----------------------------------------------------")
#TODO: need to understand and work out what sort of logging, analytics,
#TODO: and additional information the class will support.
# display containers in the storage account
def DisplayContainers(self):
for container in self._containers:
self._containers[container].DisplayContent()
# return a list of the containers in the account
def GetContainerNames(self, sort = True):
containers = list(self._containers.keys())
if (sort):
containers = sorted(containers)
return containers
# for a given container return a list of the blobs
def GetBlobNames(self, containerName, sort = True):
containerView = self._containers[containerName]
blobList = containerView.BlobList
if (sort):
blobList = sorted(blobList)
return blobList
# refresh the container view
def RefreshContainerViews(self):
self._containers = DataCache._buildContainerViews(self._blobService)
# refresh a given container view
def RefreshContainerView(self, containerName):
containerView = DataCache._buildContainerView(self._blobService, containerName)
# copy files to the Path
def CopyBlobsToPath(self, containerName, overWrite = True):
#DESIGN: for the first iteration of this just assume that the path is local, i.e.
#DESIGN: a potential folder on the users machine. this can be generalized later.
#DESIGN: also all the container contents are going to be downloaded.
if not(self._validatePath()):
return
# update the path to mirror the container layout
path = self._path + "\\" + containerName + "\\"
# local location
if (self._islocalpath):
self._createLocalPath(path)
#TODO: update the class to have some related exceptions. this validation should
#TODO: be in the same place where we get the list of blobs in a container!
if not(self._validateContainerName(containerName)):
return
print("Copying contents of container '%s'" % containerName)
blobList = self.GetBlobNames(containerName)
self._copyBlobs(containerName, blobList, path, overWrite)
# copy a random sample of blobs of count N to the Path. if N > number of blobs in the
# container, then all blobs will be copied
def CopyRandomSampleBlobsToPath(self, containerName, N):
#DESIGN: for the first iteration of this just assume that the path is local, i.e.
#DESIGN: a potential folder on the users machine. this can be generalized later.
#DESIGN: also all the container contents are going to be downloaded.
if not(self._validatePath()):
return
# update the path to mirror the container layout
path = self._path + "\\" + containerName + "\\"
# local location
if (self._islocalpath):
self._createLocalPath(path)
#TODO: update the class to have some related exceptions. this validation should
#TODO: be in the same place where we get the list of blobs in a container!
if not(self._validateContainerName(containerName)):
return
# uniformly distributed random sampling of blobs
blobList = self.GetBlobNames(containerName, False)
indicesToSample = randint(0,len(blobList),N)
blobsToSample = []
for index in indicesToSample:
blobsToSample.append(blobList[index])
#TODO: need to clear out the specified target location. since we are sampling and asking
#TODO: pull down a new sample the old one should not be augmented. this is an important point!
#DESIGN: will need to work out capturing of the blobs that have been used.
print("Copying random sample of contents from container '%s'." % containerName)
self._copyBlobs(containerName, blobsToSample, path, True)
# clear the files specified by the path location
def ClearBlobsFromPath(self, containerName):
#DESIGN: this will deleted all files which essentially is a hard cache flush
if not(self._validatePath()):
return
#TODO: see comment above about local path case!
if (self._islocalpath):
path = self._path + "\\" + containerName + "\\"
print("Clearing files from '%s' correspoding to container '%s'." % (path, containerName))
self._clearLocalPath(path)
# ---------------------------------------------------------------------------
# begin private helper methods
# build a container view
@staticmethod
def _buildContainerView(blobService, containerName):
blobList = []
blobGenerator = blobService.list_blobs(containerName)
for blob in blobGenerator:
blobList.append(blob.name)
return ContainerView(containerName, blobList)
# build the container views
@staticmethod
def _buildContainerViews(blobService):
containers = {}
containerGenerator = blobService.list_containers().items
for container in containerGenerator:
containerName = container.name
# containers[containerName] = DataCache._buildContainerView(blobService, containerName)
blobList = []
blobGenerator = blobService.list_blobs(containerName).items
for blob in blobGenerator:
blobList.append(blob.name)
containers[containerName] = ContainerView(containerName, blobList)
return containers
# validate the path
def _validatePath(self):
# if path is not set, then error
if self._path is None:
#TODO: udpate with proper user exceptions and logging information
print(TEXTBOLD + TEXTFAIL + "There is no specified path information. Please update 'Path' to specify a location." + TEXTEND)
return False
return True
# validate specified container is present
def _validateContainerName(self, containerName):
if not(containerName in self._containers):
#TODO: udpate with proper user exceptions and logging information
print(TEXTBOLD + TEXTFAIL + "Specified container '%s' does not exist in the account." %containerName + TEXTEND)
return False
return True
# create target path if needed
def _createLocalPath(self, path):
if not(os.path.exists(path)):
os.makedirs(path)
# delete objects in the target path and then remove the target path
def _clearLocalPath(self, path):
if not(os.path.exists(path)):
print("The specified cache '%s' does not exist." % path)
return
files = os.listdir(path)
for file in files:
filepath = path + file
print("Removing file '%s'" % filepath)
os.remove(filepath)
os.removedirs(path)
# copy a list of blobs
def _copyBlobs(self, containerName, blobList, path, overWrite = True):
#TODO: acquire container lease, but not sure if i need to do this???
containerLeaseId = self._blobService.acquire_container_lease(containerName)
for blobName in blobList:
print("Copying '%s' to '%s'" % (blobName, path))
targetfile = path + blobName
if overWrite or not(os.path.exists(targetfile)):
self._copyBlob(containerName, blobName, targetfile)
self._blobService.release_container_lease(containerName, containerLeaseId)
# grab a lease on a blob, copy its contents, and release the lease
def _copyBlob(self, containerName, blobName, targetfile):
#TODO: look into this lease nonsense
#TODO: blobLeaseId = self._blobService.acquire_blob_lease(containerName,blobName)
#TODO: self._blobService.get_blob_to_path(containerName, blobName, targetfile, blobLeaseId)
#TODO: self._blobService.release_blob_lease(containerName, blobName, blobLeaseId)
return self._blobService.get_blob_to_path(containerName, blobName, targetfile)
# end class DataCache
| {
"repo_name": "andlin666/DataCachePhase1",
"path": "DataCachePhase1/DataCache.py",
"copies": "1",
"size": "11552",
"license": "apache-2.0",
"hash": 8570662567537444000,
"line_mean": 38.4266211604,
"line_max": 136,
"alpha_frac": 0.6270775623,
"autogenerated": false,
"ratio": 4.487956487956488,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5615034050256488,
"avg_score": null,
"num_lines": null
} |
# a + b * c
# ATerm Graph
# ===========
#
# Arithmetic(
# Add
# , Array(){dshape("3, int64"), 45340864}
# , Arithmetic(
# Mul
# , Array(){dshape("3, int64"), 45340792}
# , Array(){dshape("3, int64"), 45341584}
# ){dshape("3, int64"), 45264528}
# ){dshape("3, int64"), 45264432}
# Execution Plan
# ==============
# vars %a %b %c
# %0 := ElemwiseNumpy{np.mul,nogil}(%b, %c)
# %0 := ElemwiseNumpy{np.add,nogil,inplace}(%0, %a)
# Responsibilities
# - allocate memory blocks on Blaze heap for LHS
# - determine whether to do operation inplace or to store the
# output in a temporary
#
# - Later: handle kernel fusion
# - Much Later: handle GPU access & thread control
from blaze.rts.storage import Heap
# =================================
# The main Blaze RTS execution loop
# =================================
# Invokes Executor functions and handles memory management from external
# sources to allocate on, IOPro allocators, SQL Queries, ZeroMQ...
# TOOD: write in Cython
def execplan(context, plan, symbols):
""" Takes a list of of instructions from the Pipeline and
then allocates the necessary memory needed for the
intermediates are temporaries """
h = Heap()
ret = None
last = plan[-1]
for instruction in plan:
ops = [symbols[sym] for sym in symbols]
dds = [op.asbuflist() for op in ops]
dss = [op.datashape() for op in ops]
if instruction.lhs:
h.allocate(instruction.lhs.size())
ret = instruction(dds, dss)
else:
instruction(dds, dss)
h.finalize()
return ret
| {
"repo_name": "davidcoallier/blaze",
"path": "blaze/rts/execution.py",
"copies": "2",
"size": "1625",
"license": "bsd-2-clause",
"hash": -1840576519012141300,
"line_mean": 25.2096774194,
"line_max": 72,
"alpha_frac": 0.5907692308,
"autogenerated": false,
"ratio": 3.276209677419355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4866978908219355,
"avg_score": null,
"num_lines": null
} |
# ABC Parser for ABC Music Notation Files
from __future__ import division
import re
import string
import math
from Preprocess import globalConstant
class TuneBook(object):
"""
Represents a tunebook with tunes and free text.
Properties
----------
text
An array of free text blocks, as strings.
tune
An array of tunes, as Tune objects.
"""
def __init__(self, filename=None):
"""
Creates a TuneBook object. If a filename is given, the file is opened
and parsed. If an invalid filename is given, throws IOError.
"""
self.text = [] # array of text blocks as strings
self.tune = [] # array of tunes as Tune
if filename:
f = open(filename, 'Ur')
self.parse(f.read())
f.close()
def parse(self, str):
"""
Parses the given input.
"""
for lines in str.split('\n\n'):
if 'x:' in lines.lower():
tune = Tune()
tune.parse(lines)
self.tune.append(tune)
else:
self.text.append(lines)
##############################################################################
class Tune(object):
"""
Represents an entire tune with information fields and music.
Properties
----------
text
An array of the lines of the tune, as strings.
line
An array of the lines of the tune, as Line objects (see below).
"""
def __init__(self, filename=None):
"""
Creates a Tune object. If a filename is given, the file is opened and
parsed.If an invalid filename is given, throws IOError.
"""
self._fields = {} # information fields
self.text = [] # array of tune lines as strings
self.line = [] # array of tune lines as Line
if filename:
f = open(filename, 'Ur')
self.parse(f.read())
f.close()
def field(self, field):
"""
Returns an information field (e.g., "T", "X"), or None if the given field
doesn't exist.
"""
if field in self._fields:
return self._fields[field]
else:
return None
def parse(self, str):
"""
Parses the given input ABC string.
"""
lineBuffer = ''
lines = str.split('\n')
for line in lines:
# Strip superfluous characters.
line = re.sub('%.*$', '', line) # Strip comments.
line = line.lstrip().rstrip() # Strip whitespace.
# Ignore blank lines.
if len(line) == 0:
continue
# If the lines begins with a letter and a colon, it's an information
# field. Extract it.
matches = re.match('([A-Za-z]):\s*(.*)', line)
if matches:
#(0) matches the whole regular expression.
#(1) matches the first pattern.
#(2) matches the second pattern,etc.
self._parseInformationField(matches.group(1), matches.group(2))
else:
# We have a tune line.
if line[-1] == "\\":
# The current line ends with a \, so just store it in the buffer
# for now.
lineBuffer += line.rstrip("\\")
else:
# The current line does not end with a \, so add it to whatever
# lines we might have seen previously and parse it.
lineBuffer += line
self.text.append(lineBuffer) # Store raw tune line.
self.line.append(Line(lineBuffer))
lineBuffer = ''
def _parseInformationField(self, field, data):
# Parses an information field. field is a letter, while data is the
# data following the field identifier. field is converted to uppercase
# before storage. Only the first occurrence of the field is stored.
field = field.upper()
if field not in self._fields:
self._fields[field] = data
def getFields(self):
return self._fields
##############################################################################
class Line(object):
"""
Represents one line in a tune.
Properties
----------
text
The raw text that was parsed.
measure
An array of Measure objects representing the individual measures
within the line.
"""
def __init__(self, line=None):
"""
Takes a text line and parses it.
"""
self.text = None # raw text of the line
self.measure = [] # array of Measures
if line:
self.parse(line)
def parse(self, line):
"""
Parses a line of ABC.
"""
self.__init__()
self.text = line
# Split the line into measures. Measure symbols are
# |, |], ||, [|, |:, :|, ::
measures = re.split('\||\|]|\|\||\[\||\|:|:\||::', line)
# Remove empty measures (typically at the end of lines).
for item in measures:
if len(item.lstrip().rstrip()) == 0:
measures.remove(item)
self.measure = [] # array of Measure objects
for measure in measures:
newMeasure = Measure()
newMeasure.parse(measure)
self.measure.append(newMeasure)
def __str__(self):
return self.text
##############################################################################
class Measure(object):
"""
Represents one measure of a line of music.
Properties
----------
text
The raw text of the measure that was parsed.
item
An array of MusicItem objects representing the individual items (notes and
chords) within this measure.
repeat
The repeat number for this measure, or None if there is no repeat.
This only simply repeats, e.g., [1 and [2
"""
def __init__(self):
"""
Constructor. Builds an empty Measure object.
"""
self._reset()
def parse(self, text):
"""
Parses a string of ABC into Notes and Chords.
"""
self._reset()
self.text = text
match = re.search('\[([12])', self.text)
if match:
# First or second repeat.
self.repeat = int(match.group(1))
self._pos += len(match.group(0))
while self._pos < len(self.text):
if self.text[self._pos].isspace():
# Ignore whitespace.
self._pos += 1
elif self.text[self._pos] == '"':
# Parse a chord.
self._parseChord()
elif self.text[self._pos] in "^=_" or self.text[self._pos].isalpha() or self.text[self._pos] == '#':
# Found the start of a note.
self._parseNote()
else:
# Skip over anything we don't recognize.
self._pos += 1
def _parseChord(self):
# Parses a chord.
newChord = Chord()
chordText = newChord.parse(self.text[self._pos:])
newChord.beat = self._beat
self._beat += newChord.duration
self.item.append(newChord)
self._pos += len(chordText) + 2 # add 2 to account for the double quotes
def _parseNote(self):
# Parses a note.
newNote = Note()
noteText, temp1, temp2, temp3 = newNote.parse(self.text[self._pos:])
newNote.beat = self._beat
self._beat += newNote.duration
self.item.append(newNote)
self._pos += len(noteText)
def _reset(self):
# Clears out all data.
self.item = [] # array of Chords and Notes for this measure
self.text = None # raw text of the measure
self._pos = 0 # parsing position within the measure
self.repeat = None # repeat number (1 or 2)
self._beat = 1 # current beat (while parsing)
def __str__(self):
return self.text
##############################################################################
class MusicItem(object):
"""
Abstract base class for "things" that appear in a line of music:
notes and chords.
Properties
----------
duration
Length of this item as a float, e.g., 0.25, 1, etc.
beat
The beat on which this item occurs (float). Starts at 1.
text
The raw text of this item.
"""
def __init__(self):
# Duration of the item as a float, e.g,. 1/4, 1/8, 1/16, 2
self.duration = 0.0
# The beat on which this item occurs: 0, 1, 2, etc.
self.beat = 0.0
# Raw text from the tune that makes up this item.
self.text = ''
def __str__(self):
return self.text
##############################################################################
class Chord(MusicItem):
"""
Represents a chord.
"""
def __init__(self):
super(Chord, self).__init__()
def parse(self, str):
"""
Parses a chord out of the given string. Returns the raw text that
was parsed from str without the surrounding double quotes.
"""
pos = 0
if pos < len(str) and str[pos] == '"':
self.text += str[pos]
pos += 1
else:
raise RuntimeError('Chord does not begin with ".' + str)
while pos < len(str) and str[pos] != '"':
self.text += str[pos]
pos += 1
if pos < len(str) and str[pos] == '"':
self.text += str[pos]
pos += 1
else:
raise RuntimeError('Chord does not end with ":' + str)
# Remove surrounding double quotes.
self.text = self.text[1:-1]
return self.text
##############################################################################
#get duration information
class Note(MusicItem):
"""
Represents a note.
Properties
----------
prefix
Optional ^, =, or _
note
The note character itself, A, B, etc.
suffix
Optional ' or ,
length
Optional note length, /4, 2, etc.
"""
def __init__(self):
super(Note, self).__init__()
self.prefix = None # optional ^, =, or _
self.note = None # note character [A-z]
self.suffix = None # optional ' or ,
self.length = None # optional length indication
self.nextNoteDurationPlus = 0.0 # the value that the next note take away, when the current note has < or >
self.nextNoteDurationFlag = False # whether the next note takes away the value or not
def parse(self, str, nextNoteDurationPlus = 0.0, nextNoteDurationFlag = False):
"""
Parses a note out of the given string. Returns the raw text that
was parsed from str.
"""
self.__init__()
pos = 0
if str == '#ending':
self.text = '#ending'
self.duration = 0
self.nextNoteDurationPlus = 0.0
self.nextNoteDurationFlag = False
return self.text, self.duration , self.nextNoteDurationPlus, self.nextNoteDurationFlag
if pos < len(str) and str[pos] in "^=_":
# Sharp, natural, or flat symbol.
self.text += str[pos]
self.prefix = str[pos]
pos += 1
if pos < len(str) and str[pos].isalpha():
# Note letter.
self.text += str[pos]
self.note = str[pos]
pos += 1
else:
raise RuntimeError('Note does not contain a character: ' + str.__str__())
if pos < len(str) and str[pos] in "',":
# Note raise or lower an octave.
self.text += str[pos]
self.suffix = str[pos]
pos += 1
while pos < len(str) and str[pos] in "/0123456789><":
# Note length.
self.text += str[pos]
if not self.length:
self.length = ""
self.length += str[pos]
pos += 1
#turn the note length(string) into a duration(float).
#given that all data is valid
slash_count = self.length.__str__().count('/')
#this dotted-note notation is only defined between two notes of equal length.
#attention: two notes which are of equal length
left_count = self.length.__str__().count('<')
right_count = self.length.__str__().count('>')
self.nextNoteDurationFlag = nextNoteDurationFlag
self.nextNoteDurationPlus = nextNoteDurationPlus
#print(self.length)
#if it is just a sigle note
if self.length is None:
#if the previous note has < or > suffix
if self.nextNoteDurationFlag == True:
self.duration = globalConstant.nextNoteDurationBase + self.nextNoteDurationPlus
#print(self.duration)
#if it does not have
else:
self.duration = globalConstant.nextNoteDurationBase
#print(self.duration)
self.nextNoteDurationPlus = 0.0
self.nextNoteDurationFlag = False
#if it is a sigle note followed by a number
elif slash_count ==0 and left_count ==0 and right_count ==0:
#and if the previous note have < or >
if self.nextNoteDurationFlag:
self.duration = float(re.match('[0-9]', self.length).group(0)) + self.nextNoteDurationPlus
#or it does not have < and >
else:
self.duration = float(re.match('[0-9]', self.length).group(0))
self.nextNoteDurationPlus = 0.0
self.nextNoteDurationFlag = False
else:
#if it has a /
if slash_count == 1:
#if it has only a /, without any number
if re.search('[0-9]', self.length) == None:
#if the previous note has < or >
if self.nextNoteDurationFlag == True:
self.duration = 1/2 + self.nextNoteDurationPlus
else:
self.duration = 1/2
#or if it has a / with numbers
else:
nums = re.findall('[0-9]', self.length)
#if it has two number
if len(nums) == 2:
#if the previous note has < or >
if self.nextNoteDurationFlag == True:
self.duration = eval(re.match('[0-9]/[0-9]', self.length).group(0)) + self.nextNoteDurationPlus
else:
self.duration = eval(re.match('[0-9]/[0-9]', self.length).group(0))
#if it has only one number
elif len(nums) == 1:
#if the case is like /3, it means 1/3
if re.search('[0-9]/', self.length) == None:
#if the previous note has < or >
if self.nextNoteDurationFlag == True:
#self.duration = eval('1/' + re.search('/[0-9]', self.length).group(0)) + _nextNoteDurationPlus
self.duration = eval('1/' + nums[0]) + self.nextNoteDurationPlus
#if it does not have < and >
else:
#self.duration = eval('1' + re.search('/[0-9]', self.length).group(0))
self.duration = eval('1/' + nums[0])
##if the case is like 3/, it means 3/2
else:
if self.nextNoteDurationFlag == True:
self.duration = eval(nums[0] + '/2') + self.nextNoteDurationPlus
else:
self.duration = eval(nums[0] + '/2')
#if it has more than one /
elif slash_count > 1:
if self.nextNoteDurationFlag == True:
self.duration = globalConstant.nextNoteDurationBase / math.pow(2, slash_count) + self.nextNoteDurationPlus
else:
self.duration = globalConstant.nextNoteDurationBase / math.pow(2, slash_count)
#if it has no /
else:
# if it has also no number
if re.search('[0-9]', self.length) == None:
#if the previous note has < or >
if self.nextNoteDurationFlag == True:
self.duration = globalConstant.nextNoteDurationBase +self.nextNoteDurationPlus
#print(self.duration)
#if the previous note does not have
else:
self.duration = globalConstant.nextNoteDurationBase
#or if also have one number
else:
if self.nextNoteDurationFlag == True:
self.duration = float(re.search('[0-9]', self.length).group(0)) + self.nextNoteDurationPlus
# if the previous note does not have < and >
else:
self.duration = float(re.search('[0-9]', self.length).group(0))
#if it also has <
if left_count != 0:
takeaway_part = self.duration / math.pow(2, left_count)
self.duration = takeaway_part
self.nextNoteDurationFlag = True
self.nextNoteDurationPlus = takeaway_part
#or if it also has >
elif right_count != 0:
takeaway_part = self.duration / math.pow(2, right_count)
self.duration = self.duration + takeaway_part
self.nextNoteDurationFlag = True
self.nextNoteDurationPlus = -(takeaway_part)
# if it has no < and >
else:
self.nextNoteDurationFlag = False
self.nextNoteDurationPlus = 0.0
return self.text, self.duration , self.nextNoteDurationPlus, self.nextNoteDurationFlag
| {
"repo_name": "ChyauAng/DNN-Composer",
"path": "src/Preprocess/abcParser.py",
"copies": "1",
"size": "14802",
"license": "mit",
"hash": 4620247068490036000,
"line_mean": 26.8757062147,
"line_max": 111,
"alpha_frac": 0.618294825,
"autogenerated": false,
"ratio": 3.2332896461336826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9011561574924651,
"avg_score": 0.06800457924180627,
"num_lines": 531
} |
# A, B, C
import pylab
import networkx as nx
import numpy as np
import random as rd
from pprint import pprint
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['text.usetex'] = True
#create the graph
#ex 0->1->2->0 1->3
T1 = nx.DiGraph()
T1.add_edge(0,1)
T1.add_edge(1,2)
T1.add_edge(2,0)
T1.add_edge(1,3)
#set counts at each node to 0
for n in range(0, len(T1.nodes())):
T1.add_node(n, ct =0)
#constants
ts = 10
tuple_index = 1
num_nodes = len(T1.nodes(data=True))
for x in range(0,ts):
#select starting node
current_node= np.random.randint(low = 0, high =num_nodes, size = 1)[0]
visited = set()
while True:
visited.add(current_node)
current_count = T1.nodes(data = True)[current_node][tuple_index]['ct']
T1.add_node(current_node, ct = current_count+1)
current_successors = set(T1.successors(current_node))
valid_successors = current_successors.difference(visited)
if not valid_successors:
break
current_node = rd.sample(valid_successors,1)[0]
for x in range(0,num_nodes):
walk_count = T1.nodes(data=True)[x][tuple_index]['ct']
print "node", x, "has been walked through", walk_count, " times "
| {
"repo_name": "mac389/petulant-network",
"path": "src/randomWalk.py",
"copies": "1",
"size": "1290",
"license": "apache-2.0",
"hash": 7607643401010109000,
"line_mean": 22.8076923077,
"line_max": 79,
"alpha_frac": 0.6240310078,
"autogenerated": false,
"ratio": 2.9119638826185104,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8875757525915441,
"avg_score": 0.03204747290061381,
"num_lines": 52
} |
"""ABCs."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from abc import ABC, abstractmethod, abstractclassmethod
from contextlib import nullcontext
import warnings
from ..utils import tight_layout
class _AbstractRenderer(ABC):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x : array, shape (n_vertices,)
The array containing the X component of the vertices.
y : array, shape (n_vertices,)
The array containing the Y component of the vertices.
z : array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles : array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color : tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the mesh.
shading : bool
If True, enable the mesh shading.
backface_culling : bool
If True, enable backface culling on the mesh.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
interpolate_before_map :
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation : str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width : int
The width of the lines when representation='wireframe'.
normals : array, shape (n_vertices, 3)
The array containing the normal of each vertex.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
kwargs : args
The arguments to pass to triangular_mesh
Returns
-------
surface :
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface : surface object
The mesh to use as support for contour.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours : int | list
Specifying a list of values will only give the requested contours.
width : float
The width of the lines or radius of the tubes.
opacity : float
The opacity of the contour.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
normalized_colormap : bool
Specify if the values of the colormap are between 0 and 1.
kind : 'line' | 'tube'
The type of the primitives to use to display the contours.
color :
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
"""Add a surface in the scene.
Parameters
----------
surface : surface object
The information describing the surface.
color : tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the surface.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling : bool
If True, enable backface culling on the surface.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center : ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color : tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity : float
The opacity of the sphere(s).
resolution : int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling : bool
If True, enable backface culling on the sphere(s).
radius : float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin : array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination : array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius : float
The radius of the tube(s).
color : tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
opacity : float
The opacity of the tube(s).
backface_culling : bool
If True, enable backface culling on the tube(s).
reverse_lut : bool
If True, reverse the lookup table.
Returns
-------
surface :
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x : array, shape (n_quivers,)
The X component of the position of the quiver.
y : array, shape (n_quivers,)
The Y component of the position of the quiver.
z : array, shape (n_quivers,)
The Z component of the position of the quiver.
u : array, shape (n_quivers,)
The last X component of the quiver.
v : array, shape (n_quivers,)
The last Y component of the quiver.
w : array, shape (n_quivers,)
The last Z component of the quiver.
color : tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode : 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution : int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height : float
The height of the glyph used with the quiver.
glyph_center : tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution : float
The resolution of the glyph used with the quiver.
opacity : float
The opacity of the quiver.
scale_mode : 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling : bool
If True, enable backface culling on the quiver.
colormap :
The colormap to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width : float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y : float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text : str
The content of the text.
size : int
The size of the font.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text.
y : float
The Y component to use as position of the text.
z : float
The Z component to use as position of the text.
text : str
The content of the text.
width : float
The width of the text.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source :
The object of the scene used for the colormap.
color :
The color of the label text.
title : str | None
The title of the scalar bar.
n_labels : int | None
The number of labels to display on the scalar bar.
bgcolor :
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
"""Configure the camera of the scene.
Parameters
----------
azimuth : float
The azimuthal angle of the camera.
elevation : float
The zenith angle of the camera.
distance : float
The distance to the focal point.
focalpoint : tuple
The focal point of the camera: (x, y, z).
roll : float
The rotation of the camera along its axis.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename : str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz : array, shape(n_points, 3)
The points to project.
ch_names : array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
class _AbstractToolBar(ABC):
@abstractmethod
def _tool_bar_load_icons(self):
pass
@abstractmethod
def _tool_bar_initialize(self, name="default", window=None):
pass
@abstractmethod
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
pass
@abstractmethod
def _tool_bar_update_button_icon(self, name, icon_name):
pass
@abstractmethod
def _tool_bar_add_text(self, name, value, placeholder):
pass
@abstractmethod
def _tool_bar_add_spacer(self):
pass
@abstractmethod
def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_set_theme(self, theme):
pass
class _AbstractDock(ABC):
@abstractmethod
def _dock_initialize(self, window=None):
pass
@abstractmethod
def _dock_finalize(self):
pass
@abstractmethod
def _dock_show(self):
pass
@abstractmethod
def _dock_hide(self):
pass
@abstractmethod
def _dock_add_stretch(self, layout):
pass
@abstractmethod
def _dock_add_layout(self, vertical=True):
pass
@abstractmethod
def _dock_add_label(self, value, align=False, layout=None):
pass
@abstractmethod
def _dock_add_button(self, name, callback, layout=None):
pass
@abstractmethod
def _dock_named_layout(self, name, layout, compact):
pass
@abstractmethod
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
pass
@abstractmethod
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
pass
@abstractmethod
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
pass
@abstractmethod
def _dock_add_group_box(self, name, layout=None):
pass
class _AbstractMenuBar(ABC):
@abstractmethod
def _menu_initialize(self, window=None):
pass
@abstractmethod
def _menu_add_submenu(self, name, desc):
pass
@abstractmethod
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _AbstractStatusBar(ABC):
@abstractmethod
def _status_bar_initialize(self, window=None):
pass
@abstractmethod
def _status_bar_add_label(self, value, stretch=0):
pass
@abstractmethod
def _status_bar_add_progress_bar(self, stretch=0):
pass
@abstractmethod
def _status_bar_update(self):
pass
class _AbstractPlayback(ABC):
@abstractmethod
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
pass
class _AbstractLayout(ABC):
@abstractmethod
def _layout_initialize(self, max_width):
pass
@abstractmethod
def _layout_add_widget(self, layout, widget, stretch=0):
pass
class _AbstractWidget(ABC):
def __init__(self, widget):
self._widget = widget
@property
def widget(self):
return self._widget
@abstractmethod
def set_value(self, value):
pass
@abstractmethod
def get_value(self):
pass
@abstractmethod
def set_range(self, rng):
pass
@abstractmethod
def show(self):
pass
@abstractmethod
def hide(self):
pass
@abstractmethod
def update(self, repaint=True):
pass
class _AbstractMplInterface(ABC):
@abstractmethod
def _mpl_initialize():
pass
class _AbstractMplCanvas(ABC):
def __init__(self, width, height, dpi):
"""Initialize the MplCanvas."""
from matplotlib import rc_context
from matplotlib.figure import Figure
# prefer constrained layout here but live with tight_layout otherwise
context = nullcontext
self._extra_events = ('resize',)
try:
context = rc_context({'figure.constrained_layout.use': True})
self._extra_events = ()
except KeyError:
pass
with context:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.manager = None
def _connect(self):
for event in ('button_press', 'motion_notify') + self._extra_events:
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, update=True, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
if update:
self.update_plot()
return line
def plot_time_line(self, x, label, update=True, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
if update:
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
if self.manager is None:
self.canvas.show()
else:
self.manager.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def clear(self):
"""Clear internal variables."""
self.close()
self.axes.clear()
self.fig.clear()
self.canvas = None
self.manager = None
def on_resize(self, event):
"""Handle resize events."""
tight_layout(fig=self.axes.figure)
class _AbstractBrainMplCanvas(_AbstractMplCanvas):
def __init__(self, brain, width, height, dpi):
"""Initialize the MplCanvas."""
super().__init__(width, height, dpi)
self.brain = brain
self.time_func = brain.callbacks["time"]
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.brain._fg_color)
super().update_plot()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
def clear(self):
"""Clear internal variables."""
super().clear()
self.brain = None
class _AbstractWindow(ABC):
def _window_initialize(self):
self._window = None
self._interactor = None
self._mplcanvas = None
self._show_traces = None
self._separate_canvas = None
self._interactor_fraction = None
@abstractmethod
def _window_close_connect(self, func):
pass
@abstractmethod
def _window_get_dpi(self):
pass
@abstractmethod
def _window_get_size(self):
pass
def _window_get_mplcanvas_size(self, fraction):
ratio = (1 - fraction) / fraction
dpi = self._window_get_dpi()
w, h = self._window_get_size()
h /= ratio
return (w / dpi, h / dpi)
@abstractmethod
def _window_get_simple_canvas(self, width, height, dpi):
pass
@abstractmethod
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
pass
@abstractmethod
def _window_adjust_mplcanvas_layout(self):
pass
@abstractmethod
def _window_get_cursor(self):
pass
@abstractmethod
def _window_set_cursor(self, cursor):
pass
@abstractmethod
def _window_new_cursor(self, name):
pass
@abstractmethod
def _window_ensure_minimum_sizes(self):
pass
@abstractmethod
def _window_set_theme(self, theme):
pass
| {
"repo_name": "rkmaddox/mne-python",
"path": "mne/viz/backends/_abstract.py",
"copies": "4",
"size": "24939",
"license": "bsd-3-clause",
"hash": -4474690987213582000,
"line_mean": 29.826946848,
"line_max": 79,
"alpha_frac": 0.5697902883,
"autogenerated": false,
"ratio": 4.304280289955126,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6874070578255127,
"avg_score": null,
"num_lines": null
} |
"""ABCs."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import warnings
from abc import ABC, abstractmethod, abstractclassmethod
from ..utils import tight_layout
from ...fixes import nullcontext
class _AbstractRenderer(ABC):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x : array, shape (n_vertices,)
The array containing the X component of the vertices.
y : array, shape (n_vertices,)
The array containing the Y component of the vertices.
z : array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles : array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color : tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the mesh.
shading : bool
If True, enable the mesh shading.
backface_culling : bool
If True, enable backface culling on the mesh.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
interpolate_before_map :
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation : str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width : int
The width of the lines when representation='wireframe'.
normals : array, shape (n_vertices, 3)
The array containing the normal of each vertex.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
kwargs : args
The arguments to pass to triangular_mesh
Returns
-------
surface :
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface : surface object
The mesh to use as support for contour.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours : int | list
Specifying a list of values will only give the requested contours.
width : float
The width of the lines or radius of the tubes.
opacity : float
The opacity of the contour.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
normalized_colormap : bool
Specify if the values of the colormap are between 0 and 1.
kind : 'line' | 'tube'
The type of the primitives to use to display the contours.
color :
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
"""Add a surface in the scene.
Parameters
----------
surface : surface object
The information describing the surface.
color : tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the surface.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling : bool
If True, enable backface culling on the surface.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center : ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color : tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity : float
The opacity of the sphere(s).
resolution : int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling : bool
If True, enable backface culling on the sphere(s).
radius : float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin : array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination : array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius : float
The radius of the tube(s).
color : tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
opacity : float
The opacity of the tube(s).
backface_culling : bool
If True, enable backface culling on the tube(s).
reverse_lut : bool
If True, reverse the lookup table.
Returns
-------
surface :
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x : array, shape (n_quivers,)
The X component of the position of the quiver.
y : array, shape (n_quivers,)
The Y component of the position of the quiver.
z : array, shape (n_quivers,)
The Z component of the position of the quiver.
u : array, shape (n_quivers,)
The last X component of the quiver.
v : array, shape (n_quivers,)
The last Y component of the quiver.
w : array, shape (n_quivers,)
The last Z component of the quiver.
color : tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode : 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution : int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height : float
The height of the glyph used with the quiver.
glyph_center : tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution : float
The resolution of the glyph used with the quiver.
opacity : float
The opacity of the quiver.
scale_mode : 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling : bool
If True, enable backface culling on the quiver.
colormap :
The colormap to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width : float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y : float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text : str
The content of the text.
size : int
The size of the font.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text.
y : float
The Y component to use as position of the text.
z : float
The Z component to use as position of the text.
text : str
The content of the text.
width : float
The width of the text.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source :
The object of the scene used for the colormap.
color :
The color of the label text.
title : str | None
The title of the scalar bar.
n_labels : int | None
The number of labels to display on the scalar bar.
bgcolor :
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
"""Configure the camera of the scene.
Parameters
----------
azimuth : float
The azimuthal angle of the camera.
elevation : float
The zenith angle of the camera.
distance : float
The distance to the focal point.
focalpoint : tuple
The focal point of the camera: (x, y, z).
roll : float
The rotation of the camera along its axis.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename : str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz : array, shape(n_points, 3)
The points to project.
ch_names : array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
class _AbstractToolBar(ABC):
@abstractmethod
def _tool_bar_load_icons(self):
pass
@abstractmethod
def _tool_bar_initialize(self, name="default", window=None):
pass
@abstractmethod
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
pass
@abstractmethod
def _tool_bar_update_button_icon(self, name, icon_name):
pass
@abstractmethod
def _tool_bar_add_text(self, name, value, placeholder):
pass
@abstractmethod
def _tool_bar_add_spacer(self):
pass
@abstractmethod
def _tool_bar_add_screenshot_button(self, name, desc, func):
pass
@abstractmethod
def _tool_bar_set_theme(self, theme):
pass
class _AbstractDock(ABC):
@abstractmethod
def _dock_initialize(self, window=None):
pass
@abstractmethod
def _dock_finalize(self):
pass
@abstractmethod
def _dock_show(self):
pass
@abstractmethod
def _dock_hide(self):
pass
@abstractmethod
def _dock_add_stretch(self, layout):
pass
@abstractmethod
def _dock_add_layout(self, vertical=True):
pass
@abstractmethod
def _dock_add_label(self, value, align=False, layout=None):
pass
@abstractmethod
def _dock_add_button(self, name, callback, layout=None):
pass
@abstractmethod
def _dock_named_layout(self, name, layout, compact):
pass
@abstractmethod
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
pass
@abstractmethod
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
pass
@abstractmethod
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
pass
@abstractmethod
def _dock_add_group_box(self, name, layout=None):
pass
class _AbstractMenuBar(ABC):
@abstractmethod
def _menu_initialize(self, window=None):
pass
@abstractmethod
def _menu_add_submenu(self, name, desc):
pass
@abstractmethod
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _AbstractStatusBar(ABC):
@abstractmethod
def _status_bar_initialize(self, window=None):
pass
@abstractmethod
def _status_bar_add_label(self, value, stretch=0):
pass
@abstractmethod
def _status_bar_add_progress_bar(self, stretch=0):
pass
class _AbstractPlayback(ABC):
@abstractmethod
def _playback_initialize(self, func, timeout):
pass
class _AbstractLayout(ABC):
@abstractmethod
def _layout_initialize(self, max_width):
pass
@abstractmethod
def _layout_add_widget(self, layout, widget):
pass
class _AbstractWidget(ABC):
def __init__(self, widget):
self._widget = widget
@property
def widget(self):
return self._widget
@abstractmethod
def set_value(self, value):
pass
@abstractmethod
def get_value(self):
pass
class _AbstractMplInterface(ABC):
@abstractmethod
def _mpl_initialize():
pass
class _AbstractMplCanvas(ABC):
def __init__(self, width, height, dpi):
"""Initialize the MplCanvas."""
from matplotlib import rc_context
from matplotlib.figure import Figure
# prefer constrained layout here but live with tight_layout otherwise
context = nullcontext
self._extra_events = ('resize',)
try:
context = rc_context({'figure.constrained_layout.use': True})
self._extra_events = ()
except KeyError:
pass
with context:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.manager = None
def _connect(self):
for event in ('button_press', 'motion_notify') + self._extra_events:
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
self.update_plot()
return line
def plot_time_line(self, x, label, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
if self.manager is None:
self.canvas.show()
else:
self.manager.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def clear(self):
"""Clear internal variables."""
self.close()
self.axes.clear()
self.fig.clear()
self.canvas = None
self.manager = None
def on_resize(self, event):
"""Handle resize events."""
tight_layout(fig=self.axes.figure)
class _AbstractBrainMplCanvas(_AbstractMplCanvas):
def __init__(self, brain, width, height, dpi):
"""Initialize the MplCanvas."""
super().__init__(width, height, dpi)
self.brain = brain
self.time_func = brain.callbacks["time"]
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.brain._fg_color)
super().update_plot()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
def clear(self):
"""Clear internal variables."""
super().clear()
self.brain = None
class _AbstractWindow(ABC):
def _window_initialize(self):
self._window = None
self._interactor = None
self._mplcanvas = None
self._show_traces = None
self._separate_canvas = None
self._interactor_fraction = None
@abstractmethod
def _window_close_connect(self, func):
pass
@abstractmethod
def _window_get_dpi(self):
pass
@abstractmethod
def _window_get_size(self):
pass
def _window_get_mplcanvas_size(self, fraction):
ratio = (1 - fraction) / fraction
dpi = self._window_get_dpi()
w, h = self._window_get_size()
h /= ratio
return (w / dpi, h / dpi)
@abstractmethod
def _window_get_simple_canvas(self, width, height, dpi):
pass
@abstractmethod
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
pass
@abstractmethod
def _window_adjust_mplcanvas_layout(self):
pass
@abstractmethod
def _window_get_cursor(self):
pass
@abstractmethod
def _window_set_cursor(self, cursor):
pass
@abstractmethod
def _window_ensure_minimum_sizes(self):
pass
@abstractmethod
def _window_set_theme(self, theme):
pass
| {
"repo_name": "kambysese/mne-python",
"path": "mne/viz/backends/_abstract.py",
"copies": "3",
"size": "24285",
"license": "bsd-3-clause",
"hash": -4780147132705384000,
"line_mean": 30.2548262548,
"line_max": 79,
"alpha_frac": 0.5698167593,
"autogenerated": false,
"ratio": 4.305851063829787,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 777
} |
"""abd automates the creation and landing of reviews from branches."""
# =============================================================================
# CONTENTS
# -----------------------------------------------------------------------------
# abdi_processrepo
#
# Public Functions:
# create_review
# create_differential_review
# update_review
# update_in_review
# land
# create_failed_review
# try_create_review
# process_updated_branch
# process_abandoned_branch
# process_branches
#
# -----------------------------------------------------------------------------
# (this contents block is generated, edits will be lost)
# =============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import phlcon_differential
import abdcmnt_commenter
import abdt_conduitgit
import abdt_exception
import abdt_git
import abdt_userwarning
_DEFAULT_TEST_PLAN = "I DIDNT TEST"
_LOGGER = logging.getLogger(__name__)
def create_review(conduit, branch):
branch.verify_review_branch_base()
# TODO: we should also cc other users on the branch
# TODO: if there are emails that don't match up to users then we should
# note that on the review and perhaps use the mailer to notify them
name, email, user, phid = abdt_conduitgit.getPrimaryUserDetailsFromBranch(
conduit, branch)
_LOGGER.debug("- author: {}".format(user))
user_warnings = []
message = branch.get_commit_message_from_tip()
try:
parsed = conduit.parse_commit_message(message)
except phlcon_differential.UnknownParseCommitMessageResponseError as e:
raise abdt_exception.CommitMessageParseException(
errors=[e],
fields=[],
digest=message)
d = phlcon_differential
if parsed.errors:
error_list = phlcon_differential.parse_commit_message_errors(
parsed.errors)
for error in error_list:
if isinstance(error, d.ParseCommitMessageNoTestPlanFail):
parsed.fields["testPlan"] = _DEFAULT_TEST_PLAN
user_warnings.append(
abdt_userwarning.UsedDefaultTestPlan(_DEFAULT_TEST_PLAN))
elif isinstance(error, d.ParseCommitMessageUnknownReviewerFail):
user_warnings.append(
abdt_userwarning.UnknownReviewers(
error.user_list, message))
else:
raise abdt_exception.CommitMessageParseException(
errors=parsed.errors,
fields=parsed.fields,
digest=branch.make_message_digest())
# remove the author from reviewer list if present
reviewer_phids_key = phlcon_differential.MessageFields.reviewer_phids
if reviewer_phids_key in parsed.fields:
reviewer_phids = parsed.fields[reviewer_phids_key]
if phid in reviewer_phids:
reviewer_phids.remove(phid)
user_warnings.append(abdt_userwarning.SelfReviewer(user, message))
diff_result = branch.make_raw_diff()
raw_diff = diff_result.diff
if not raw_diff:
raise abdt_exception.AbdUserException("no difference to review")
if diff_result.reduction_list:
user_warnings.append(abdt_userwarning.LargeDiff(diff_result))
revisionid = create_differential_review(
conduit, user, parsed, branch, raw_diff)
commenter = abdcmnt_commenter.Commenter(conduit, revisionid)
if user_warnings:
commenter.userWarnings(user_warnings)
def create_differential_review(conduit, user, parsed, branch, raw_diff):
_LOGGER.debug("- creating revision")
revision_id = conduit.create_revision_as_user(
raw_diff, parsed.fields, user)
_LOGGER.debug("- created {}".format(revision_id))
branch.mark_ok_new_review(revision_id)
_LOGGER.debug("- commenting on {}".format(revision_id))
commenter = abdcmnt_commenter.Commenter(conduit, revision_id)
commenter.createdReview(
branch.get_repo_name(),
branch.review_branch_hash(),
branch.review_branch_name(),
branch.base_branch_name(),
branch.get_browse_url())
return revision_id
def update_review(conduit, branch):
revision_id = branch.review_id_or_none()
if branch.has_new_commits():
_LOGGER.debug("changes on branch")
branch.verify_review_branch_base()
update_in_review(conduit, branch)
elif branch.is_status_bad_abandoned():
if not conduit.is_review_abandoned(revision_id):
# update the review as the branch may have been bad previously
# and we'll want to re-assess it's status
update_in_review(conduit, branch)
elif not conduit.is_review_recently_updated(revision_id):
review_name = branch.review_branch_name()
review_hash = branch.review_branch_hash()
branch.remove()
commenter = abdcmnt_commenter.Commenter(conduit, revision_id)
commenter.abandonedForUser(
review_name,
review_hash,
abdt_git.ARCYD_ABANDONED_REF)
return
elif conduit.is_review_abandoned(revision_id):
raise abdt_exception.ReviewAbandonedException()
elif branch.is_status_bad() and not branch.is_status_bad_land():
try:
_LOGGER.debug("try updating bad branch")
branch.verify_review_branch_base()
update_in_review(conduit, branch)
except abdt_exception.AbdUserException:
_LOGGER.debug("still bad")
if not branch.is_status_bad():
if conduit.is_review_accepted(revision_id):
branch.verify_review_branch_base()
land(conduit, branch)
# TODO: we probably want to do a better job of cleaning up locally
def update_in_review(conduit, branch):
_LOGGER.debug("update_in_review")
_LOGGER.debug("- creating diff")
diff_result = branch.make_raw_diff()
if not diff_result.diff:
raise abdt_exception.AbdUserException("no difference to review")
user_warnings = []
if diff_result.reduction_list:
user_warnings.append(abdt_userwarning.LargeDiff(diff_result))
review_id = branch.review_id_or_none()
review_id_str = str(review_id)
_LOGGER.debug("- updating revision {}".format(review_id_str))
conduit.update_revision(
review_id,
diff_result.diff,
'update\n\n``` lang=text\n' + branch.describe_new_commits() + '```')
branch.mark_ok_in_review()
_LOGGER.debug("- commenting on revision {}".format(review_id_str))
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.updatedReview(
branch.review_branch_hash(),
branch.review_branch_name())
if user_warnings:
commenter.userWarnings(user_warnings)
def land(conduit, branch):
_LOGGER.debug("landing {}".format(branch.review_branch_name()))
review_branch_name = branch.review_branch_name()
base_branch_name = branch.base_branch_name()
names_emails = branch.get_author_names_emails()
if not names_emails:
raise abdt_exception.LandingException(
"no commits on branch", review_branch_name, base_branch_name)
# pick the last author as the author for landing
name, email = names_emails[-1]
review_id = branch.review_id_or_none()
# store the branch hash now, the branch will be invalid after landing
review_branch_hash = branch.review_branch_hash()
# compose the commit message
message = conduit.get_commit_message(review_id)
land_message = branch.land(name, email, message)
_LOGGER.debug("- commenting on revision {}".format(review_id))
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.landedReview(
review_branch_hash,
review_branch_name,
base_branch_name,
land_message)
conduit.close_revision(review_id)
def create_failed_review(conduit, branch, exception):
user = abdt_conduitgit.getAnyUserFromBranch(conduit, branch)
reviewid = conduit.create_empty_revision_as_user(user)
commenter = abdcmnt_commenter.Commenter(conduit, reviewid)
commenter.failedCreateReview(
branch.get_repo_name(),
branch.review_branch_hash(),
branch.review_branch_name(),
branch.get_browse_url(),
exception)
branch.mark_new_bad_in_review(reviewid)
def try_create_review(
mailer, conduit, branch, mail_on_fail):
try:
create_review(conduit, branch)
except abdt_exception.AbdUserException as e:
_LOGGER.debug("failed to create: {}".format(e))
try:
create_failed_review(conduit, branch, e)
except abdt_exception.NoUsersOnBranchException as e:
_LOGGER.debug("failed to create failed review: {}".format(e))
branch.mark_bad_pre_review()
if mail_on_fail:
mailer.noUsersOnBranch(
e.review_branch_name, e.base_name, e.emails)
def process_updated_branch(mailer, conduit, branch):
abdte = abdt_exception
review_branch_name = branch.review_branch_name()
if branch.is_new():
_LOGGER.debug("create review for {}".format(review_branch_name))
try_create_review(
mailer,
conduit,
branch,
mail_on_fail=True)
else:
review_id = branch.review_id_or_none()
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
if branch.is_status_bad_pre_review():
_LOGGER.debug(
"try again to create review for {}".format(review_branch_name))
has_new_commits = branch.has_new_commits()
try_create_review(
mailer,
conduit,
branch,
mail_on_fail=has_new_commits)
else:
try:
update_review(conduit, branch)
except abdte.ReviewAbandonedException as e:
branch.mark_bad_abandoned()
commenter.exception(e)
except abdte.LandingException as e:
_LOGGER.debug("landing exception")
branch.mark_bad_land()
commenter.exception(e)
conduit.set_requires_revision(review_id)
except abdte.LandingPushBaseException as e:
_LOGGER.debug("landing push base exception")
# we don't need to set bad_land here, requiring revision is ok
commenter.exception(e)
conduit.set_requires_revision(review_id)
except abdte.AbdUserException as e:
_LOGGER.debug("user exception")
branch.mark_bad_in_review()
commenter.exception(e)
def process_abandoned_branch(conduit, branch):
_LOGGER.debug(
"untracking abandoned branch: {}".format(branch.review_branch_name()))
review_id = branch.review_id_or_none()
if review_id is not None:
commenter = abdcmnt_commenter.Commenter(conduit, review_id)
commenter.abandonedBranch(branch.review_branch_name())
# TODO: abandon the associated revision if not already
branch.abandon()
def process_branches(branches, conduit, mailer):
for branch in branches:
if branch.is_abandoned():
process_abandoned_branch(conduit, branch)
elif branch.is_null():
pass # TODO: should handle these
else:
process_updated_branch(
mailer, conduit, branch)
# -----------------------------------------------------------------------------
# Copyright (C) 2013-2015 Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "kjedruczyk/phabricator-tools",
"path": "py/abd/abdi_processrepo.py",
"copies": "4",
"size": "12398",
"license": "apache-2.0",
"hash": -1841986050498883600,
"line_mean": 34.7291066282,
"line_max": 79,
"alpha_frac": 0.6216325214,
"autogenerated": false,
"ratio": 3.853901150139882,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 347
} |
# abduction.py
# Logical abduction for kb of definite clauses
# Andrew S. Gordon
import parse
import unify
import itertools
def abduction(obs, kb, maxdepth, skolemize = True):
'''Logical abduction: returns a list of all sets of assumptions that entail the observations given the kb'''
indexed_kb = index_by_consequent_predicate(kb)
res = []
listoflists = [and_or_leaflists([ob], indexed_kb, maxdepth) for ob in obs]
for u in itertools.product(*listoflists):
u = list(itertools.chain.from_iterable(u))
res.extend(crunch(u))
if skolemize:
return [unify.skolemize(r) for r in res]
else:
return res
def index_by_consequent_predicate(kb):
res = {}
for dc in kb:
predicate = parse.consequent(dc)[0]
if predicate in res:
res[predicate].append(dc)
else:
res[predicate] = [dc]
return res
def and_or_leaflists(remaining, indexed_kb, depth, antecedents = [], assumptions = []):
'''Returns list of all entailing sets of leafs in the and-or backchaining tree'''
if depth == 0 and len(antecedents) > 0: # fail
return [] # (empty) list of lists
elif len(remaining) == 0: # done with this level
if len(antecedents) == 0: # found one
return [assumptions] # list of lists
else:
return and_or_leaflists(antecedents, indexed_kb, depth - 1, [], assumptions)
else: # more to go on this level
literal = remaining[0] # first of remaining
predicate = literal[0]
if predicate not in indexed_kb:
return and_or_leaflists(remaining[1:], indexed_kb, depth, antecedents, [literal] + assumptions) # shift literal to assumptions
else:
revisions = []
for rule in indexed_kb[predicate]: # indexed by predicate of literal
theta = unify.unify(literal, parse.consequent(rule))
if theta != None:
if depth == 0: # no depth for revision
return [] # (empty) list of lists
revisions.append([unify.subst(theta, remaining[1:]), # new remaining with substitutions
indexed_kb,
depth,
unify.standardize(unify.subst(theta, parse.antecedent(rule))) +
unify.subst(theta, antecedents), # new antecedents with substitutions
unify.subst(theta, assumptions)]) # new assumptions with substitutions
return itertools.chain(*[and_or_leaflists(*rev) for rev in revisions]) # list of lists (if any)
def crunch(conjunction):
'''Returns all possible ways that literals in a conjunction could be unified'''
return [k for k,v in itertools.groupby(sorted(cruncher(conjunction, 0)))] # dedupe solutions
def cruncher(conjunction, idx = 0):
if idx >= len(conjunction) - 1: # last one
return [[k for k,v in itertools.groupby(sorted(conjunction))]] # dedupe literals in solution
else:
res = []
for subsequent in range(idx + 1,len(conjunction)):
theta = unify.unify(conjunction[idx], conjunction[subsequent])
if theta:
new_conjunction = unify.subst(theta,
conjunction[0:subsequent] +
conjunction[(subsequent + 1):len(conjunction)])
res.extend(cruncher(new_conjunction, idx))
res.extend(cruncher(conjunction, idx + 1))
return res
| {
"repo_name": "asgordon/EtcAbductionPy",
"path": "etcabductionpy/abduction.py",
"copies": "1",
"size": "3622",
"license": "bsd-2-clause",
"hash": 3914288927135969000,
"line_mean": 44.275,
"line_max": 138,
"alpha_frac": 0.5877967973,
"autogenerated": false,
"ratio": 4.051454138702461,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5139250936002461,
"avg_score": null,
"num_lines": null
} |
#a beautiful grid pattern on the screen
import pygame
import time
class Player:
def __init__(self, player_id, name, score, position = (-1,11), roll = 0):
self.id = player_id
self.name = name
self.score = score
self.position = position
self.roll = roll
self.category = 0
self.x = -1
self.y = 11
self.rect = (self.x, self.y)
self.moved = True
def relocate(self, c, x, y):
self.c = c
self.x = x
self.y = y
self.location = (x,y)
def add_category(self, category):
self.category = category
def add_steps(self, steps):
self.steps = steps
def add_type(self, type):
self.type = type
def update(self, moves):
if moves > 0:
set = False
keys = pygame.key.get_pressed()
if keys[pygame.K_LEFT] and 0 > self.x >= 8:
self.x -= 1
set = True
elif keys[pygame.K_RIGHT] and 0 >= self.x > 8:
self.x += 1
set = True
if keys[pygame.K_UP] and 0 > self.y >= 8:
self.y -= 1
set = True
elif keys[pygame.K_DOWN] and 0 >= self.y > 8:
self.y += 1
set = True
if set == True:
moves =- 1
time.sleep(0.3)
class Point:
def __init__(self, x, y, category, highlight):
self.x = x
self.y = y
self.category = category
self.highlight = highlight
def highlight(self):
if self.highlight == 0:
self.highlight = 1
else:
self.highlight = 0
def drawself(self, screen, width, height, grid_height):
if self.x >= 0 and self.y >= 0:
pygame.draw.rect(screen, (0,0,0), [width/20 + width/4*self.category + width/8*self.x, height/grid_height *self.y + height/50, 8*(1+self.highlight), 8*(1+self.highlight)], 2)
else:
print("Player is not in game yet")
class Grid:
def __init__(self, grid_width=2, grid_height=10):
self.points =[]
self.players =[]
self.grid_width = grid_width
self.grid_height = grid_height
self.colorlist = ((255,0,0), (0,0,255), (255, 255, 0), (0,255, 0))
def addplayer(self, player):
if not self.players.__contains__(player):
self.players.append(player)
#draw the grid and update whilst checking if someone wins
#if someone wins, def returns True
def drawgrid(self, screen, width, height):
#draw backgroundcolors
for c in range(0,4):
templist = []
for x in range(0, self.grid_width):
for y in range(0, self.grid_height):
for player in self.players:
if player.y < 0:
drawTextInRect(self.screen, "Player {} Wins!".format(player.name), (0, 0, 0),
(self.width / 2, self.height / 2), pygame.font.SysFont("Arial", 40))
print("Terminate Game")
return True
else:
if player.highlight == 1 and player.x == x and player.y == y and player.category == c:
Point(x, y ,c, 1).drawself(screen, width, height, self.grid_height)
templist.append(Point(x, y ,c, 1))
else:
Point(x, y ,c, 0).drawself(screen, width, height, self.grid_height)
templist.append(Point(x, y ,c, 0))
templist.append(Point(x, y ,c, 1))
self.points.append(templist)
#call Sections to draw grid and players
#
class Sections:
def __init__(self, screen, width, height, players, categories=4, grid_width=2, grid_heigth=10):
self.listc = []
self.players = players
self.screen = screen
self.width = width
self.height = height
self.categories = categories
self.grid_width = grid_width
self.grid_height = grid_heigth
#colors are: red, blue, yellow, green
self.colorlist = ((255,0,0), (0,0,255), (255, 255, 0), (0,255, 0))
i = 1
for counter in range(0, 4):
pygame.draw.rect(self.screen, self.colorlist[counter], [i, 0, self.width / 4, self.height], 0)
i += self.width / 4
for category in range(0, categories):
for x in range(0, self.grid_width):
for y in range(0, self.grid_height):
Point(x, y, category, 0).drawself(self.screen, self.width, self.height, self.grid_height)
self.listc.append(Point(x, y, category, 0))
def drawplayer(self, player, c, x, y):
player.relocate(c, x, y)
def draw(self, player):
i = 0
self.updateplayer(player)
for category in range(0, self.categories):
for x in range(0, self.grid_width):
for y in range(0, self.grid_height):
if player.x == x and player.y == y and player.category == category:
Point(x, y, category, 2).drawself(self.screen, self.width, self.height, self.grid_height)
else:
Point(x, y, category, 0).drawself(self.screen, self.width, self.height, self.grid_height)
def getpoint(self, category, x, y):
for items in self.listc:
if items.x == self.players.x and items.y == self.players.y:
return items
def updateplayer(self, player):
if player.y >= 0:
self.getpoint(player.category, player.x, player.y).highlight()
else:
drawTextInRect(self.screen, "Player {} Wins!".format(player.name), (0,0,0),(self.width/2, self.height/2), pygame.font.SysFont("Arial", 40))
def addplayer(self, player):
self.players = player
class Game:
def __init__(self):
# starts pygame
pygame.init()
self.font = pygame.font.SysFont("Times", 40)
self.score = 0
self.width = 800
self.height = 600
self.size = (self.width, self.height)
self.font = pygame.font.SysFont("Arial", 40)
self.screen = pygame.display.set_mode(self.size)
running = True
# check function
bob = Player(1, "Bob", 0)
while process_events():
# draw logic
self.screen.fill((0,0,0))
menu = Sections(self.screen, self.width, self.height/2, bob)
bob.update(20)
# must also flip backscreen
pygame.display.flip()
print(5/2)
def process_events():
for event in pygame.event.get():
if event.type == pygame.QUIT:
return False
return True
game = Game() | {
"repo_name": "daniellinye/HRINFG3",
"path": "Test_Files/location test.py",
"copies": "1",
"size": "6941",
"license": "mit",
"hash": 5548306871896422000,
"line_mean": 28.1680672269,
"line_max": 185,
"alpha_frac": 0.516784325,
"autogenerated": false,
"ratio": 3.7458175930922826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4762601918092283,
"avg_score": null,
"num_lines": null
} |
"""A benchmark for diesel's internal timers.
Try something like:
$ python examples/timer_bench.py 10
$ python examples/timer_bench.py 100
$ python examples/timer_bench.py 1000
The script will output the total time to run with the given number of
producer/consumer pairs and a sample of CPU time while the benchmark was
running.
"""
import os
import subprocess
import sys
import time
import diesel
from diesel.util.event import Countdown
from diesel.util.queue import Queue
OPERATIONS = 60
cpustats = []
def producer(q):
for i in xrange(OPERATIONS):
diesel.sleep(0.5)
q.put(i)
def consumer(q, done):
for i in xrange(OPERATIONS):
evt, data = diesel.first(waits=[q], sleep=10000)
if evt == "sleep":
print "sleep was triggered!"
break
done.tick()
def pair(done):
q = Queue()
diesel.fork(producer, q)
diesel.fork(consumer, q, done)
def track_cpu_stats():
pid = os.getpid()
def append_stats():
rawstats = subprocess.Popen(['ps -p %d -f' % pid], shell=True, stdout=subprocess.PIPE).communicate()[0]
header, data = rawstats.split('\n', 1)
procstats = [d for d in data.split(' ') if d]
cpustats.append(int(procstats[3]))
while True:
diesel.sleep(1)
diesel.thread(append_stats)
def main():
diesel.fork(track_cpu_stats)
actor_pairs = int(sys.argv[1])
done = Countdown(actor_pairs)
for i in xrange(actor_pairs):
pair(done)
start = time.time()
done.wait()
print "done in %.2f secs" % (time.time() - start)
diesel.sleep(1)
diesel.quickstop()
if __name__ == '__main__':
diesel.set_log_level(diesel.loglevels.ERROR)
diesel.quickstart(main)
print cpustats
| {
"repo_name": "dieseldev/diesel",
"path": "examples/timer_bench.py",
"copies": "1",
"size": "1760",
"license": "bsd-3-clause",
"hash": 4817101190680312000,
"line_mean": 23.7887323944,
"line_max": 111,
"alpha_frac": 0.6397727273,
"autogenerated": false,
"ratio": 3.3396584440227706,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9456866232575958,
"avg_score": 0.0045129877493625195,
"num_lines": 71
} |
""" A benchmark utility used in speed/performance tests. """
from os import getpid
from test import pystone # native python-core "PYSTONE" Benchmark Program
from timeit import default_timer as timer
from psutil import Process
# The result is a number of pystones per second the computer is able to perform,
# and the time used to perform the benchmark, result depends on the hardware.
benchtime, pystones = pystone.pystones()
kpystones = pystones / 1000.0
stats = {}
# pylint: disable-msg=W0102
def profile(name='stats', _stats=stats):
"""Calculates a duration (wall clock time, not the CPU time) and a memory size."""
def _profile(function):
def __profile(*args, **kw):
start_time = timer()
start_memory = _get_memory_usage()
try:
return function(*args, **kw)
finally:
total = timer() - start_time
kstones = _seconds_to_kpystones(total)
memory = _get_memory_usage() - start_memory
_stats[name] = {'time': total,
'kstones': kstones,
'memory': memory}
return __profile
return _profile
def _seconds_to_kpystones(seconds):
""" Return pystones amount of time performing operations. """
return kpystones * seconds
def _get_memory_usage():
""" Return the memory resident set size (top->RES) usage in bytes. """
process = Process(getpid())
return process.memory_info().rss
| {
"repo_name": "duboviy/pybenchmark",
"path": "pybenchmark/profile.py",
"copies": "1",
"size": "1526",
"license": "mit",
"hash": -3399335632202586600,
"line_mean": 33.6818181818,
"line_max": 86,
"alpha_frac": 0.6107470511,
"autogenerated": false,
"ratio": 4.1808219178082195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008091963129150417,
"num_lines": 44
} |
ABERRANT_PLURAL_MAP = {
'appendix': 'appendices',
'barracks': 'barracks',
'cactus': 'cacti',
'child': 'children',
'criterion': 'criteria',
'deer': 'deer',
'echo': 'echoes',
'elf': 'elves',
'embargo': 'embargoes',
'focus': 'foci',
'fungus': 'fungi',
'goose': 'geese',
'hero': 'heroes',
'hoof': 'hooves',
'index': 'indices',
'knife': 'knives',
'leaf': 'leaves',
'life': 'lives',
'man': 'men',
'mouse': 'mice',
'nucleus': 'nuclei',
'person': 'people',
'phenomenon': 'phenomena',
'potato': 'potatoes',
'self': 'selves',
'syllabus': 'syllabi',
'tomato': 'tomatoes',
'torpedo': 'torpedoes',
'veto': 'vetoes',
'woman': 'women',
}
VOWELS = set('aeiou')
def pluralize(singular):
"""
Taken from ActiveState recipe
http://code.activestate.com/recipes/577781-pluralize-word-convert-singular-word-to-its-plural/
Original code follows:
Return plural form of given lowercase singular word (English only). Based on
ActiveState recipe http://code.activestate.com/recipes/413172/
>>> pluralize('')
''
>>> pluralize('goose')
'geese'
>>> pluralize('dolly')
'dollies'
>>> pluralize('genius')
'genii'
>>> pluralize('jones')
'joneses'
>>> pluralize('pass')
'passes'
>>> pluralize('zero')
'zeros'
>>> pluralize('casino')
'casinos'
>>> pluralize('hero')
'heroes'
>>> pluralize('church')
'churches'
>>> pluralize('x')
'xs'
>>> pluralize('car')
'cars'
"""
if not singular:
return ''
plural = ABERRANT_PLURAL_MAP.get(singular)
if plural:
return plural
root = singular
try:
if singular[-1] == 'y' and singular[-2] not in VOWELS:
root = singular[:-1]
suffix = 'ies'
elif singular[-1] == 's':
if singular[-2] in VOWELS:
if singular[-3:] == 'ius':
root = singular[:-2]
suffix = 'i'
else:
root = singular[:-1]
suffix = 'ses'
else:
suffix = 'es'
elif singular[-2:] in ('ch', 'sh'):
suffix = 'es'
else:
suffix = 's'
except IndexError:
suffix = 's'
plural = root + suffix
return plural
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "Govexec/django-odd-utilities",
"path": "odd_utilities/text_utilities.py",
"copies": "1",
"size": "2465",
"license": "mit",
"hash": 6774506253501130000,
"line_mean": 22.932038835,
"line_max": 98,
"alpha_frac": 0.5030425963,
"autogenerated": false,
"ratio": 3.344640434192673,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4347683030492673,
"avg_score": null,
"num_lines": null
} |
"""A big ball of mud to hold common functionality pending a re-org."""
import os
import cv2
import numpy
import mel.lib.datetime
import mel.lib.image
def determine_filename_for_ident(*source_filenames):
if not source_filenames:
raise ValueError(
"{} is not a valid list of filenames".format(source_filenames)
)
dates = [
mel.lib.datetime.guess_datetime_from_path(x) for x in source_filenames
]
valid_dates = [x for x in dates if x is not None]
if valid_dates:
latest_date = max(valid_dates)
return "{}.jpg".format(latest_date.date().isoformat())
else:
return "ident.jpg"
def overwrite_image(directory, filename, image):
if not os.path.exists(directory):
os.makedirs(directory)
path = os.path.join(directory, filename)
if os.path.exists(path):
os.remove(path)
write_image(path, image)
def write_image(path, image):
if not cv2.imwrite(str(path), image):
raise Exception("Was unable to write image to '{}'.".format(path))
def user_mark_moles(window_name, context_image, detail_image, num_moles):
display_image = numpy.copy(context_image)
cv2.imshow(window_name, display_image)
circle_radius = 50
context_mole_positions = []
detail_mole_positions = []
current_mole_positions = context_mole_positions
cv2.setMouseCallback(
window_name,
make_mole_capture_callback(
window_name, display_image, circle_radius, context_mole_positions
),
)
# main loop
print("Please mark all specified moles, double-click to mark.")
print("Press any key to abort.")
is_finished = False
while not is_finished:
key = cv2.waitKey(50)
if key != -1:
raise Exception("User aborted.")
if len(current_mole_positions) == num_moles:
if not detail_mole_positions:
current_mole_positions = detail_mole_positions
display_image = numpy.copy(detail_image)
cv2.setMouseCallback(
window_name,
make_mole_capture_callback(
window_name,
display_image,
circle_radius,
detail_mole_positions,
),
)
cv2.imshow(window_name, display_image)
else:
print("context positions:")
print(context_mole_positions)
print("detail positions:")
print(detail_mole_positions)
is_finished = True
# stop handling events, or there could be nasty side-effects
cv2.setMouseCallback(window_name, make_null_mouse_callback())
return context_mole_positions, detail_mole_positions
def make_mole_capture_callback(window_name, image, radius, mole_positions):
def draw_circle(event, x, y, _flags, _param):
del _flags, _param
if event == cv2.EVENT_LBUTTONDOWN:
cv2.circle(image, (x, y), radius, (255, 0, 0), -1)
mole_positions.append((x, y, radius))
cv2.imshow(window_name, image)
return draw_circle
def make_null_mouse_callback():
def null_callback(_event, _x, _y, _flags, _param):
del _event, _x, _y, _flags, _param
return null_callback
def box_moles(image, mole_positions, thickness):
left = min((m[0] - m[2] for m in mole_positions))
top = min((m[1] - m[2] for m in mole_positions))
right = max((m[0] + m[2] for m in mole_positions))
bottom = max((m[1] + m[2] for m in mole_positions))
left -= 2 * thickness
top -= 2 * thickness
right += 2 * thickness
bottom += 2 * thickness
left_top = (left, top)
right_bottom = (right, bottom)
blue = (255, 0, 0)
cv2.rectangle(image, left_top, right_bottom, blue, thickness)
def connect_moles(image, mole_positions):
for mole_a, mole_b in yield_neighbors(mole_positions):
thickness = max(mole_a[2], mole_b[2])
# draw connection
a = numpy.array(mole_a[:2])
b = numpy.array(mole_b[:2])
a_to_b = b - a
a_to_b = a_to_b / numpy.linalg.norm(a_to_b)
padding = a_to_b * (thickness * 2)
padding = padding.astype(int)
a += padding
b -= padding
a = tuple(a.tolist())
b = tuple(b.tolist())
blue = (255, 0, 0)
print(a_to_b, a, b, thickness)
cv2.line(image, a, b, blue, thickness)
def yield_neighbors(node_list):
is_first = True
prev_node = None
for node in node_list:
if is_first:
is_first = False
else:
yield (prev_node, node)
prev_node = node
def new_image(height, width):
return numpy.zeros((height, width, 3), numpy.uint8)
def copy_image_into_image(source, dest, y, x):
shape = source.shape
dest[y : (y + shape[0]), x : (x + shape[1])] = source
def shrink_to_max_dimension(image, max_dimension):
"""May or may not return the original image."""
shape = image.shape
height = shape[0]
width = shape[1]
scaling_factor = max_dimension / max(width, height)
if scaling_factor >= 1:
return image
else:
new_width = int(width * scaling_factor)
new_height = int(height * scaling_factor)
return cv2.resize(image, (new_width, new_height))
def indicate_mole(image, mole):
pos = mole[:2]
radius = mole[2]
draw_radial_line(image, pos, radius * 4, radius * 6, (-1, 0), radius)
draw_radial_line(image, pos, radius * 4, radius * 6, (1, 0), radius)
draw_radial_line(image, pos, radius * 4, radius * 6, (0, 1), radius)
draw_radial_line(image, pos, radius * 4, radius * 6, (0, -1), radius)
def draw_radial_line(
image, origin, inner_radius, outer_radius, direction, thickness
):
origin = numpy.array(origin)
direction = numpy.array(direction)
line_start = origin + direction * inner_radius
line_end = origin + direction * outer_radius
blue = (255, 0, 0)
line_start = tuple(line_start.tolist())
line_end = tuple(line_end.tolist())
cv2.line(image, line_start, line_end, blue, thickness)
def user_review_image(window_name, image):
cv2.imshow(window_name, image)
print("Press 'q' quit, any other key to continue.")
key = cv2.waitKey()
if key == ord("q"):
raise Exception("User aborted.")
def rotated90(image, times):
for _ in range(times % 4):
image = cv2.transpose(image)
image = cv2.flip(image, 1)
return image
def add_context_detail_arguments(parser):
parser.add_argument(
"context",
type=str,
default=None,
help="Path to the context image to add.",
)
parser.add_argument(
"detail",
type=str,
default=None,
help="Path to the detail image to add.",
)
parser.add_argument(
"--rot90",
type=int,
default=None,
help="Rotate images 90 degrees clockwise this number of times.",
)
parser.add_argument(
"--rot90-context",
type=int,
default=None,
help="Rotate context image 90 degrees clockwise this number of times.",
)
parser.add_argument(
"--rot90-detail",
type=int,
default=None,
help="Rotate detail image 90 degrees clockwise this number of times.",
)
parser.add_argument(
"--h-mirror",
action="store_true",
help="Mirror both images horizontally.",
)
parser.add_argument(
"--h-mirror-context",
action="store_true",
help="Mirror context image horizontally.",
)
parser.add_argument(
"--h-mirror-detail",
action="store_true",
help="Mirror detail image horizontally.",
)
def process_context_detail_args(args):
# TODO: validate destination path up-front
# TODO: validate mole names up-front
context_image = mel.lib.image.load_image(args.context)
detail_image = mel.lib.image.load_image(args.detail)
if args.rot90:
context_image = rotated90(context_image, args.rot90)
detail_image = rotated90(detail_image, args.rot90)
if args.rot90_context:
context_image = rotated90(context_image, args.rot90_context)
if args.rot90_detail:
context_image = rotated90(detail_image, args.rot90_detail)
if args.h_mirror:
context_image = cv2.flip(context_image, 1)
detail_image = cv2.flip(detail_image, 1)
if args.h_mirror_context:
context_image = cv2.flip(context_image, 1)
if args.h_mirror_detail:
detail_image = cv2.flip(detail_image, 1)
return context_image, detail_image
# -----------------------------------------------------------------------------
# Copyright (C) 2015-2018 Angelos Evripiotis.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------ END-OF-FILE ----------------------------------
| {
"repo_name": "aevri/mel",
"path": "mel/lib/common.py",
"copies": "1",
"size": "9500",
"license": "apache-2.0",
"hash": -4382820604492715500,
"line_mean": 27.4431137725,
"line_max": 79,
"alpha_frac": 0.5969473684,
"autogenerated": false,
"ratio": 3.572771718691237,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.966920287812985,
"avg_score": 0.00010324179227751394,
"num_lines": 334
} |
""" Abilities, including both positive and negative.
"""
import numbers
class Base:
""" Base.
"""
name = "base"
""" The name of that card.
"""
optional = True
""" Indicates that if the card effect is optional.
"""
stop_draw = False
""" Indicates that if agent must stop draw free cards.
"""
def prepare_battle_effect(self, context, agent):
""" Effect specified by the ability.
"""
def battle_effect(self, context, agent):
""" Effect specified by the ability.
"""
def after_battle_effect(self, context, agent):
""" Effect specified by the ability.
"""
class PositiveIntegral(Base):
""" Base class containing a positive integral num.
"""
def __init__(self, num):
if not isinstance(num, numbers.Integral):
raise ValueError("num should be integral.")
if num <= 0:
raise ValueError("num should be positive.")
self._num = num
class Null(Base):
""" An ability that does nothing.
"""
name = "none"
optional = False
class BelowThePile(Base):
""" Put 1 card to the bottom of pile.
"""
name = "below the pile"
def prepare_battle_effect(self, context, agent):
card = agent.select(context.visible, context.battle_field.cards)
context.own_pile.put_below(card)
class Cards(PositiveIntegral):
""" Add num cards.
"""
name = "Cards +{}"
def __init__(self, num):
super().__init__(num)
self.name = Cards.name.format(num)
def prepare_battle_effect(self, context, agent):
context.battle_field.free_card_num += self._num
class Copy(Base):
""" Copy 1 ability.
"""
name = "copy"
def prepare_battle_effect(self, context, agent):
card = agent.select(context.visible, context.turn.cards)
card.effect(context)
class Destroy(Base):
""" Destroy 1 card.
"""
name = "destroy"
def prepare_battle_effect(self, context, agent):
card = agent.select(context.visible, context.turn.cards)
context.battle_field.destroy(card)
class Double(Base):
""" Double fighting value of 1 card.
"""
name = "double"
def battle_effect(self, context, agent):
card = agent.select(context.visible, context.turn.cards)
context.battle_field.double(card)
class Exchange(PositiveIntegral):
""" Discard 1 card then draw 1 card. Repeat num times.
"""
name = "exchange {}"
def __init__(self, num):
super().__init__(num)
self.name = Exchange.name.format(num)
def prepare_battle_effect(self, context, agent):
for _ in self._num:
card = agent.select(context.battle_field.cards)
context.battle_field.exchange(card, context.own_pile.draw())
class Life(PositiveIntegral):
""" Add num life.
"""
name = "life +{}"
def __init__(self, num):
super().__init__(num)
self.name = Life.name.format(num)
def prepare_battle_effect(self, context, agent):
context.life += self._num
class Step(Base):
""" Step - 1
"""
name = "step -1"
def battle_effect(self, context, agent):
context.battle_field.step -= 1
class Sort(Base):
""" Sort 3 cards / discard 1 of 3
"""
name = "sort"
def prepare_battle_effect(self, context, agent):
sorted_cards = [context.own_pile.draw() for _ in range(3)]
# TODO optional discard one card.
# TODO put 2 ~ 3 cards back to the top of pile.
class HighestZero(Base):
""" Make highest fighing value to zero. Cannot effect to same card again.
"""
name = "highest = 0"
optional = False
def battle_effect(self, context, agent):
context.battle_field.highest_zero += 1
class NegLife(PositiveIntegral):
""" Lose life.
"""
name = "life -{}"
optional = False
def __init__(self, num):
super().__init__(num)
self.name = NegLife.name.format(num)
def after_battle_effect(self, context, agent):
""" See module docstring.
"""
context.life -= self._num
class Stop(Base):
""" Stop draw free card immediatly.
"""
name = "stop"
stop_draw = True
| {
"repo_name": "cwahbong/tgif-py",
"path": "tgif/ability.py",
"copies": "1",
"size": "4254",
"license": "mit",
"hash": 6088259394291484000,
"line_mean": 21.9945945946,
"line_max": 78,
"alpha_frac": 0.5895627645,
"autogenerated": false,
"ratio": 3.77797513321492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48675378977149203,
"avg_score": null,
"num_lines": null
} |
# Ability definitions
class Ability(object):
"""A class to outline abilities"""
def __init__(self, name, cooldown):
"""
:type name: string
:param name: Name of the ability
:type cooldown: integer
:param cooldown: How many turns ability is on cooldown
"""
self.name = name
self.cooldown = cooldown
self.on_cd = False
def __repr__(self):
return self.name
def __str__(self):
return self.name
def useable(self):
"""
Check if the ability is on cooldown
"""
if not self.on_cd: return True
else: return False
class AbilityRawDamage(Ability):
"""
This deals raw damage
"""
def __init__(self, name, cooldown, damage):
"""
:type damage: integer
:param damage: how much damage the attack does
"""
Ability.__init__(self, name, cooldown)
self.damage = damage
def use(self, champion):
"""
:param champion: the champion the attack is being used on
"""
champion.receive_damage(self.damage)
class AbilityOverTime(Ability):
"""
Deals damage over time
"""
def __init__(self, name, cooldown, turns, damage):
Ability.__init__(self, name, cooldown)
self.turns = turns
self.damage = damage
def use(self, champion):
# This will be executed `self.turns` times
champion.receive_damage(self.damage)
class AbilityHeal(Ability):
"""
This heals a champion
"""
def __init__(self, name, cooldown, health):
Ability.__init__(self, name, cooldown)
self.health = health
def use(self, champion):
champion.receive_heal(self.health)
| {
"repo_name": "JakeCowton/Pok-e-Lol",
"path": "champion/ability.py",
"copies": "1",
"size": "1765",
"license": "mit",
"hash": -4780608585899015000,
"line_mean": 21.6282051282,
"line_max": 65,
"alpha_frac": 0.5694050992,
"autogenerated": false,
"ratio": 3.8038793103448274,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48732844095448274,
"avg_score": null,
"num_lines": null
} |
# ability_manager.py
class AbilityManager(object):
"""
Manages ability cooldowns and damage over time
"""
def __init__(self, interface):
"""
:type interface: Interface object
:param interface: The interface used for outputing data
"""
self.interface = interface
# [[ability, receiver, turns_remaining]...]
self.abilities_over_time = []
# [[ability, turns_remaining]...]
self.abilities_on_cd = []
def turn(self):
"""
What needs to be done each turn
"""
# Manager over time abilities
queue_for_ot_removal = []
for i in range(len(self.abilities_over_time)):
# If he over time ability is on the last turn
if self.abilities_over_time[i][2] == 1:
# Use the attack once more
self.abilities_over_time[i][0].use(self.abilities_over_time[i][1])
# Queue for removal after iteration complete
queue_for_ot_removal.append(self.abilities_over_time[i])
else:
# Use the attack
self.abilities_over_time[i][0].use(self.abilities_over_time[i][1])
# Reduce the number of turns
self.abilities_over_time[i][2] -= 1
# Display that the attack was done
self.interface.over_time(self.abilities_over_time[i])
# Remove the abilities queued for removal
for ability in queue_for_ot_removal:
self.end_over_time(ability)
# Manage cooldowns
queue_for_cd_removal = []
for i in range(len(self.abilities_on_cd)):
# If the ability is due to end cooldown period
if self.abilities_on_cd[i][1] == 1:
# Queue for removal after iteration
queue_for_cd_removal.append(self.abilities_on_cd[i])
else:
# Decrease the amount of time left on cooldown
self.abilities_on_cd[i][1] -= 1
# Remove the abilities queued for removal
for ability in queue_for_cd_removal:
self.take_off_cd(ability)
# Cooldown specific functions
def put_on_cd(self, ability):
"""
Put an ability on cooldown
:type ability: Ability object
:param ability: The ability to put on cooldown
"""
# Add it to the list of abilties on cooldown
self.abilities_on_cd.append([ability, ability.cooldown + 1])
# Set the ability as `on_cd`
ability.on_cd = True
def take_off_cd(self, ability):
"""
Take an ability off cooldown
:type ability: Ability object
:param ability: The ability to take of cooldown
"""
# Take the ability off cooldown
self.abilities_on_cd.remove(ability)
# Set the ability as not on_cd
ability[0].on_cd = False
# Over time specific functions
def begin_over_time(self, ability, receiver, giver):
"""
Add an ability to the list of abilities actively doing something over
time
:type ability: Ability object
:param ability: The ability being added to the list
:type receiver: champion object
:param receiver: The champion the ability is being used on
:type giver: champion object
:param giver: The champion using the ability
"""
# Add to the over time list
# turns - 1 as 1 hit has already been dealt
self.abilities_over_time.append([ability, receiver, (ability.turns - 1), giver])
def end_over_time(self, ability):
"""
End an ability that is doing something over time
:type ability: Ability object
:param ability: The ability to end
"""
# Stop the ability from do its over time affects
self.abilities_over_time.remove(ability)
| {
"repo_name": "JakeCowton/Pok-e-Lol",
"path": "gameplay/ability_manager.py",
"copies": "1",
"size": "3278",
"license": "mit",
"hash": -8299035789375688000,
"line_mean": 28.0088495575,
"line_max": 82,
"alpha_frac": 0.6949359365,
"autogenerated": false,
"ratio": 3.188715953307393,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.916312371919756,
"avg_score": 0.04410563412196665,
"num_lines": 113
} |
from __future__ import print_function
from . import Image, _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder(object):
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join(pattern[p] for p in permutation)
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print(p,r)
# print('--')
# compile the patterns into regular expressions for speed
for i, pattern in enumerate(patterns):
p = pattern[0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, pattern[1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp(object):
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
| {
"repo_name": "ossdemura/django-miniblog",
"path": "Lib/site-packages/PIL/ImageMorph.py",
"copies": "8",
"size": "8313",
"license": "mit",
"hash": -4762195770560904000,
"line_mean": 32.252,
"line_max": 79,
"alpha_frac": 0.5066762901,
"autogenerated": false,
"ratio": 4.076998528690535,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8583674818790535,
"avg_score": null,
"num_lines": null
} |
from __future__ import print_function
from PIL import Image
from PIL import _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder(object):
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join(pattern[p] for p in permutation)
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print(p,r)
# print('--')
# compile the patterns into regular expressions for speed
for i, pattern in enumerate(patterns):
p = pattern[0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, pattern[1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp(object):
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
| {
"repo_name": "ryfeus/lambda-packs",
"path": "Pdf_docx_pptx_xlsx_epub_png/source/PIL/ImageMorph.py",
"copies": "14",
"size": "8330",
"license": "mit",
"hash": 7620133925840861000,
"line_mean": 32.187250996,
"line_max": 79,
"alpha_frac": 0.5075630252,
"autogenerated": false,
"ratio": 4.079333986287953,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
from PIL import Image
from PIL import _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder:
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these:
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
. or X - Ignore
1 - Pixel is on
0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
4 - 4 way rotation
N - Negate
1 - Dummy op for no other operation (an op must always be given)
M - Mirroring
Example:
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray([symbols[(i & m) > 0] for i in range(LUT_SIZE)])
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join([pattern[p] for p in permutation])
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print p,r
# print '--'
# compile the patterns into regular expressions for speed
for i in range(len(patterns)):
p = patterns[i][0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, patterns[i][1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp:
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
# End of file
| {
"repo_name": "rec/echomesh",
"path": "lib/darwin/PIL/ImageMorph.py",
"copies": "4",
"size": "7946",
"license": "mit",
"hash": 3513508162133477000,
"line_mean": 31.5655737705,
"line_max": 77,
"alpha_frac": 0.5033979361,
"autogenerated": false,
"ratio": 4.066530194472876,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 244
} |
from PIL import Image
from PIL import _imagingmorph
import re
LUT_SIZE = 1 << 9
class LutBuilder(object):
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
'corner': ['1:(... ... ...)->0',
'4:(00. 01. ...)->1'],
'dilation4': ['4:(... .0. .1.)->1'],
'dilation8': ['4:(... .0. .1.)->1',
'4:(... .0. ..1)->1'],
'erosion4': ['4:(... .1. .0.)->0'],
'erosion8': ['4:(... .1. .0.)->0',
'4:(... .1. ..0)->0'],
'edge': ['1:(... ... ...)->0',
'4:(.0. .1. ...)->1',
'4:(01. .1. ...)->1']
}
if op_name not in known_patterns:
raise Exception('Unknown pattern '+op_name+'!')
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray([symbols[(i & m) > 0] for i in range(LUT_SIZE)])
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert(len(permutation) == 9)
return ''.join([pattern[p] for p in permutation])
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if '4' in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], [6, 3, 0,
7, 4, 1,
8, 5, 2]), res))
# mirror
if 'M' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append(
(self._string_permute(pattern, [2, 1, 0,
5, 4, 3,
8, 7, 6]), res))
# negate
if 'N' in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = (pattern
.replace('0', 'Z')
.replace('1', '0')
.replace('Z', '1'))
res = '%d' % (1-int(res))
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(
r'(\w*):?\s*\((.+?)\)\s*->\s*(\d)', p.replace('\n', ''))
if not m:
raise Exception('Syntax error in pattern "'+p+'"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(' ', '').replace('\n', '')
patterns += self._pattern_permute(pattern, options, result)
# # Debugging
# for p,r in patterns:
# print p,r
# print '--'
# compile the patterns into regular expressions for speed
for i in range(len(patterns)):
p = patterns[i][0].replace('.', 'X').replace('X', '[01]')
p = re.compile(p)
patterns[i] = (p, patterns[i][1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ('0'*(9-len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp(object):
"""A class for binary morphological operators"""
def __init__(self,
lut=None,
op_name=None,
patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(
bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if self.lut is None:
raise Exception('No operator loaded')
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels."""
if image.mode != 'L':
raise Exception('Image must be binary, meaning it must use mode L')
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, 'rb') as f:
self.lut = bytearray(f.read())
if len(self.lut) != 8192:
self.lut = None
raise Exception('Wrong size operator file!')
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception('No operator loaded')
with open(filename, 'wb') as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
# End of file
| {
"repo_name": "BaichuanWu/Blog_on_django",
"path": "site-packages/PIL/ImageMorph.py",
"copies": "19",
"size": "8308",
"license": "mit",
"hash": 3173045108083631600,
"line_mean": 32.0996015936,
"line_max": 79,
"alpha_frac": 0.5060182956,
"autogenerated": false,
"ratio": 4.072549019607843,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 251
} |
import re
from . import Image, _imagingmorph
LUT_SIZE = 1 << 9
# fmt: off
ROTATION_MATRIX = [
6, 3, 0,
7, 4, 1,
8, 5, 2,
]
MIRROR_MATRIX = [
2, 1, 0,
5, 4, 3,
8, 7, 6,
]
# fmt: on
class LutBuilder:
"""A class for building a MorphLut from a descriptive language
The input patterns is a list of a strings sequences like these::
4:(...
.1.
111)->1
(whitespaces including linebreaks are ignored). The option 4
describes a series of symmetry operations (in this case a
4-rotation), the pattern is described by:
- . or X - Ignore
- 1 - Pixel is on
- 0 - Pixel is off
The result of the operation is described after "->" string.
The default is to return the current pixel value, which is
returned if no other match is found.
Operations:
- 4 - 4 way rotation
- N - Negate
- 1 - Dummy op for no other operation (an op must always be given)
- M - Mirroring
Example::
lb = LutBuilder(patterns = ["4:(... .1. 111)->1"])
lut = lb.build_lut()
"""
def __init__(self, patterns=None, op_name=None):
if patterns is not None:
self.patterns = patterns
else:
self.patterns = []
self.lut = None
if op_name is not None:
known_patterns = {
"corner": ["1:(... ... ...)->0", "4:(00. 01. ...)->1"],
"dilation4": ["4:(... .0. .1.)->1"],
"dilation8": ["4:(... .0. .1.)->1", "4:(... .0. ..1)->1"],
"erosion4": ["4:(... .1. .0.)->0"],
"erosion8": ["4:(... .1. .0.)->0", "4:(... .1. ..0)->0"],
"edge": [
"1:(... ... ...)->0",
"4:(.0. .1. ...)->1",
"4:(01. .1. ...)->1",
],
}
if op_name not in known_patterns:
raise Exception("Unknown pattern " + op_name + "!")
self.patterns = known_patterns[op_name]
def add_patterns(self, patterns):
self.patterns += patterns
def build_default_lut(self):
symbols = [0, 1]
m = 1 << 4 # pos of current pixel
self.lut = bytearray(symbols[(i & m) > 0] for i in range(LUT_SIZE))
def get_lut(self):
return self.lut
def _string_permute(self, pattern, permutation):
"""string_permute takes a pattern and a permutation and returns the
string permuted according to the permutation list.
"""
assert len(permutation) == 9
return "".join(pattern[p] for p in permutation)
def _pattern_permute(self, basic_pattern, options, basic_result):
"""pattern_permute takes a basic pattern and its result and clones
the pattern according to the modifications described in the $options
parameter. It returns a list of all cloned patterns."""
patterns = [(basic_pattern, basic_result)]
# rotations
if "4" in options:
res = patterns[-1][1]
for i in range(4):
patterns.append(
(self._string_permute(patterns[-1][0], ROTATION_MATRIX), res)
)
# mirror
if "M" in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
patterns.append((self._string_permute(pattern, MIRROR_MATRIX), res))
# negate
if "N" in options:
n = len(patterns)
for pattern, res in patterns[0:n]:
# Swap 0 and 1
pattern = pattern.replace("0", "Z").replace("1", "0").replace("Z", "1")
res = 1 - int(res)
patterns.append((pattern, res))
return patterns
def build_lut(self):
"""Compile all patterns into a morphology lut.
TBD :Build based on (file) morphlut:modify_lut
"""
self.build_default_lut()
patterns = []
# Parse and create symmetries of the patterns strings
for p in self.patterns:
m = re.search(r"(\w*):?\s*\((.+?)\)\s*->\s*(\d)", p.replace("\n", ""))
if not m:
raise Exception('Syntax error in pattern "' + p + '"')
options = m.group(1)
pattern = m.group(2)
result = int(m.group(3))
# Get rid of spaces
pattern = pattern.replace(" ", "").replace("\n", "")
patterns += self._pattern_permute(pattern, options, result)
# compile the patterns into regular expressions for speed
for i, pattern in enumerate(patterns):
p = pattern[0].replace(".", "X").replace("X", "[01]")
p = re.compile(p)
patterns[i] = (p, pattern[1])
# Step through table and find patterns that match.
# Note that all the patterns are searched. The last one
# caught overrides
for i in range(LUT_SIZE):
# Build the bit pattern
bitpattern = bin(i)[2:]
bitpattern = ("0" * (9 - len(bitpattern)) + bitpattern)[::-1]
for p, r in patterns:
if p.match(bitpattern):
self.lut[i] = [0, 1][r]
return self.lut
class MorphOp:
"""A class for binary morphological operators"""
def __init__(self, lut=None, op_name=None, patterns=None):
"""Create a binary morphological operator"""
self.lut = lut
if op_name is not None:
self.lut = LutBuilder(op_name=op_name).build_lut()
elif patterns is not None:
self.lut = LutBuilder(patterns=patterns).build_lut()
def apply(self, image):
"""Run a single morphological operation on an image
Returns a tuple of the number of changed pixels and the
morphed image"""
if self.lut is None:
raise Exception("No operator loaded")
if image.mode != "L":
raise Exception("Image must be binary, meaning it must use mode L")
outimage = Image.new(image.mode, image.size, None)
count = _imagingmorph.apply(bytes(self.lut), image.im.id, outimage.im.id)
return count, outimage
def match(self, image):
"""Get a list of coordinates matching the morphological operation on
an image.
Returns a list of tuples of (x,y) coordinates
of all matching pixels. See :ref:`coordinate-system`."""
if self.lut is None:
raise Exception("No operator loaded")
if image.mode != "L":
raise Exception("Image must be binary, meaning it must use mode L")
return _imagingmorph.match(bytes(self.lut), image.im.id)
def get_on_pixels(self, image):
"""Get a list of all turned on pixels in a binary image
Returns a list of tuples of (x,y) coordinates
of all matching pixels. See :ref:`coordinate-system`."""
if image.mode != "L":
raise Exception("Image must be binary, meaning it must use mode L")
return _imagingmorph.get_on_pixels(image.im.id)
def load_lut(self, filename):
"""Load an operator from an mrl file"""
with open(filename, "rb") as f:
self.lut = bytearray(f.read())
if len(self.lut) != LUT_SIZE:
self.lut = None
raise Exception("Wrong size operator file!")
def save_lut(self, filename):
"""Save an operator to an mrl file"""
if self.lut is None:
raise Exception("No operator loaded")
with open(filename, "wb") as f:
f.write(self.lut)
def set_lut(self, lut):
"""Set the lut from an external source"""
self.lut = lut
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/PIL/ImageMorph.py",
"copies": "1",
"size": "7896",
"license": "mit",
"hash": -5161624055058367000,
"line_mean": 31.2285714286,
"line_max": 87,
"alpha_frac": 0.5374873354,
"autogenerated": false,
"ratio": 3.863013698630137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9899285385480758,
"avg_score": 0.0002431297098755619,
"num_lines": 245
} |
# A binary ordered tree example
class CNode:
left , right, data = None, None, 0
def __init__(self, data):
# initializes the data members
self.left = None
self.right = None
self.data = data
class CBOrdTree:
def __init__(self):
# initializes the root member
self.root = None
def addNode(self, data):
# creates a new node and returns it
return CNode(data)
def insert(self, root, data):
# inserts a new data
if root == None:
# it there isn't any data
# adds it and returns
return self.addNode(data)
else:
# enters into the tree
if data <= root.data:
# if the data is less than the stored one
# goes into the left-sub-tree
root.left = self.insert(root.left, data)
else:
# processes the right-sub-tree
root.right = self.insert(root.right, data)
return root
def lookup(self, root, target):
# looks for a value into the tree
if root == None:
return 0
else:
# if it has found it...
if target == root.data:
return 1
else:
if target < root.data:
# left side
return self.lookup(root.left, target)
else:
# right side
return self.lookup(root.right, target)
def minValue(self, root):
# goes down into the left
# arm and returns the last value
while(root.left != None):
root = root.left
return root.data
def maxDepth(self, root):
if root == None:
return 0
else:
# computes the two depths
ldepth = self.maxDepth(root.left)
rdepth = self.maxDepth(root.right)
# returns the appropriate depth
return max(ldepth, rdepth) + 1
def size(self, root):
if root == None:
return 0
else:
return self.size(root.left) + 1 + self.size(root.right)
def printTree(self, root):
# prints the tree path
if root == None:
pass
else:
self.printTree(root.left)
print root.data,
self.printTree(root.right)
def printRevTree(self, root):
# prints the tree path in reverse
# order
if root == None:
pass
else:
self.printRevTree(root.right)
print root.data,
self.printRevTree(root.left)
if __name__ == "__main__":
# create the binary tree
BTree = CBOrdTree()
# add the root node
root = BTree.addNode(0)
# ask the user to insert values
for i in range(0, 5):
data = int(raw_input("insert the node value nr %d: " % i))
# insert values
BTree.insert(root, data)
print
BTree.printTree(root)
print
BTree.printRevTree(root)
print
data = int(raw_input("insert a value to find: "))
if BTree.lookup(root, data):
print "found"
else:
print "not found"
print BTree.minValue(root)
print BTree.maxDepth(root)
print BTree.size(root)
| {
"repo_name": "ActiveState/code",
"path": "recipes/Python/286239_Binary_ordered_tree/recipe-286239.py",
"copies": "1",
"size": "3357",
"license": "mit",
"hash": -1125595048848715900,
"line_mean": 26.975,
"line_max": 67,
"alpha_frac": 0.5111706881,
"autogenerated": false,
"ratio": 4.206766917293233,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5217937605393232,
"avg_score": null,
"num_lines": null
} |
# A binary search number guesser
# Uses Python3
from math import ceil, log
lowNum = 0 # The lowest number we guessed
highNum = 1000 # The highest number we guessed
guessCounter = 0 # For each guess, this will increase by one
depth = ceil(log(highNum - lowNum, 2)) # Maximum number of guesses prediction
answer = 'h' # Answer from user: the guess is too high/low or correct
mean = highNum # The average between lowNum and highNum
print("Think of a number between {0} and {1} and I will try to guess it in "
"{2} guesses or less".format(lowNum, highNum, depth))
print("If I guess too high, let me know by pressing the 'h' key.")
print("If I guess too low, let me know by pressing the 'l' key.")
print("But if I guess correctly, let me know by pressing the 'y' key.")
while lowNum < highNum:
if answer == 'y': # guess was correct
print("Yay! I guessed it in {0} guesses!".format(guessCounter))
break
if answer == 'h': # guess was too high
highNum = mean
if answer == 'l': # guess was too low
lowNum = mean
mean = ceil((highNum + lowNum)/2) # definition of the mean of 2 numbers
answer = input("Is your number {0}? ".format(mean))
guessCounter += 1
| {
"repo_name": "ericpoe/pyNumGuesser",
"path": "numGuesser.py",
"copies": "1",
"size": "1236",
"license": "mit",
"hash": -6089007737824386000,
"line_mean": 40.2,
"line_max": 78,
"alpha_frac": 0.6593851133,
"autogenerated": false,
"ratio": 3.501416430594901,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4660801543894901,
"avg_score": null,
"num_lines": null
} |
"""A binary to train Adience using a single GPU.
Accuracy:
Speed: With batch_size 128.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
#from tensorflow.models.image.cifar10 import cifar10
import adience
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '../../MLtrained',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 100000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train(train_continue):
"""Train Adience for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for Adience.
images, labels = adience.distorted_inputs()
print("distorted images")
#print(labels)
# Build a Graph that computes the logits predictions from the
# inference model.
print('call inference')
logits = adience.inference(images)
# Calculate loss.
print('call loss')
loss = adience.loss(logits, labels)
# Build a Grahalloph that trains the model with one batch of examples and
# updates the model parameters.
print('train_op')
train_op = adience.train(loss, global_step)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Create a saver.
if not train_continue:
saver = tf.train.Saver(tf.all_variables())
load_step = 0
else:
# Restore the moving average version of the learned variables for eval.
variable_averages = tf.train.ExponentialMovingAverage(
adience.MOVING_AVERAGE_DECAY)
variables_to_restore = {}
for v in tf.all_variables():
if v in tf.trainable_variables():
restore_name = variable_averages.average_name(v)
else:
restore_name = v.op.name
variables_to_restore[restore_name] = v
saver = tf.train.Saver(variables_to_restore)
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
if ckpt and ckpt.model_checkpoint_path:
print("Checkpoint found")
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
# Assuming model_checkpoint_path looks something like:
# /my-favorite-path/cifar10_train/model.ckpt-0,
# extract global_step from it.
load_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + 1
print("Start from step: {}".format(load_step))
else:
print('No checkpoint file found')
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def)
for step in xrange(FLAGS.max_steps - load_step):
# continue
step += load_step
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
else:
print("Step already over limit: {}".format(FLAGS.max_steps))
def main(argv=None): # pylint: disable=unused-argument
#cifar10.maybe_download_and_extract()
# Continue training or remove current training data if existing data
if gfile.Exists(FLAGS.train_dir):
print("Train data found")
train_continue = None
while train_continue == None:
input_continue = raw_input("Continue training? (y/n): ")
input_continue.lower()
if input_continue == 'y' or input_continue == 'yes':
train_continue = True
elif input_continue == 'n' or input_continue == 'no':
train_continue = False
else:
print("Wrong input, please type y or n.")
# Continue True
if train_continue:
print("Continue True\n")
train(True)
# Continue False
else:
print("Continue False, delete data\n")
gfile.DeleteRecursively(FLAGS.train_dir)
gfile.MakeDirs(FLAGS.train_dir)
train(False)
# No previous train data
else:
print("No trainings data found\n")
gfile.MakeDirs(FLAGS.train_dir)
train(False)
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "NumesSanguis/MLTensor",
"path": "adience/adience_train.py",
"copies": "1",
"size": "6324",
"license": "apache-2.0",
"hash": -9160484344937814000,
"line_mean": 33.7472527473,
"line_max": 93,
"alpha_frac": 0.5725806452,
"autogenerated": false,
"ratio": 4.1936339522546415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5266214597454641,
"avg_score": null,
"num_lines": null
} |
"""A binary to train BiLSTM on the KTH data set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import video_train
import tensorflow as tf
from data.kth_data import KTHData
from data.lca_data import LCAData
tf.app.flags.DEFINE_string("data_path", None,
"Where the training/validation data is stored.")
tf.app.flags.DEFINE_string("save_path", 'result',
"Model output directory.")
tf.app.flags.DEFINE_string("dataset", 'KTH',
"Select the dataset, default is KTH datasetk, choice between (KTH, LCA)")
tf.app.flags.DEFINE_string("image_height", 120,
"Image height")
tf.app.flags.DEFINE_string("image_width", 160,
"Image width")
tf.app.flags.DEFINE_string("channels", 1,
"Image width")
FLAGS = tf.app.flags.FLAGS
config = {
'epoch' : 6,
'lr_decay' : 0.8,
'keep_prob' : 0.8,
'init_scale' : 0.1, # weight initialization value (-init_scale, init_scale)
'batch_size' : 20,
'learning_rate' : 0.5,
'max_grad_norm' : 5,
'decay_begin_epoch' : 2,
'examples_per_shard' : 23,
'input_queue_memory_factor' : 2,
'num_layers' : 2,
# num_steps: This value must be the same as the sequence_length value,
# inside the data/convert_to_records.py when you generate the data.,
'num_steps' : 16,
'hidden_size' : 200,
}
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to KTH data directory")
# Select the dataset
train_data = None
if FLAGS.dataset == 'KTH':
train_data = KTHData('train')
elif FLAGS.dataset == 'LCA':
train_data = LCAData('train')
assert train_data
assert train_data.data_files()
config['num_classes'] = train_data.num_classes()
# Start training
video_train.train(config, train_data)
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "frankgu/tensorflow_video_rnn",
"path": "main.py",
"copies": "1",
"size": "1977",
"license": "mit",
"hash": -3745662834997159400,
"line_mean": 28.9545454545,
"line_max": 93,
"alpha_frac": 0.6130500759,
"autogenerated": false,
"ratio": 3.300500834724541,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44135509106245413,
"avg_score": null,
"num_lines": null
} |
"""A binary to train CIFAR-10 using a single GPU.
Accuracy:
cifar10_train.py achieves ~86% accuracy after 100K steps (256 epochs of
data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import time
import tensorflow.python.platform
from tensorflow.python.platform import gfile
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.models.image.cifar10 import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', './cifar10_train', # '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 40000, # 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
loss = cifar10.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = cifar10.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir)
global_step = 0
if ckpt and gfile.Exists(ckpt.model_checkpoint_path):
print("Reading model parameters from %s" %
ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
global_step = int(ckpt.model_checkpoint_path.split(
'/')[-1].split('-')[-1])
else:
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
graph_def=sess.graph_def)
for step in xrange(global_step, FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if not gfile.Exists(FLAGS.train_dir):
# gfile.DeleteRecursively(FLAGS.train_dir)
gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "dnlcrl/TensorFlow-Playground",
"path": "1.tutorials/4.Convolutional Neural Networks/cifar10_train.py",
"copies": "1",
"size": "4763",
"license": "mit",
"hash": 7451763699984025000,
"line_mean": 34.5447761194,
"line_max": 83,
"alpha_frac": 0.5918538736,
"autogenerated": false,
"ratio": 3.8411290322580647,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4932982905858065,
"avg_score": null,
"num_lines": null
} |
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import yarntf
import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
tf.summary.scalar(loss_name, l)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
with tf.variable_scope(tf.get_variable_scope()):
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.summary.scalar('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.global_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.summary.FileWriter(os.environ["YARNTF_TB_DIR"], sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
# cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
_, _ = yarntf.createClusterServer()
train()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "tobiajo/hops-tensorflow",
"path": "yarntf/examples/cifar10/cifar10_multi_gpu_train.py",
"copies": "2",
"size": "9627",
"license": "apache-2.0",
"hash": 2211993407219763500,
"line_mean": 36.3139534884,
"line_max": 83,
"alpha_frac": 0.6462033863,
"autogenerated": false,
"ratio": 3.6899195093905712,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5336122895690572,
"avg_score": null,
"num_lines": null
} |
"""A binary to train eye using CPU or a single GPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import time
from datetime import datetime
import numpy as np
import tensorflow as tf
import eye_model
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/eye_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 10000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def train():
"""Train eye for a number of steps."""
with tf.Graph().as_default():
global_step = tf.Variable(0, trainable=False)
# Get images and labels of eye.
images, labels = eye_model.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = eye_model.inference(images)
# Calculate loss.
loss = eye_model.loss(logits, labels)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = eye_model.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph.
sess = tf.Session(config=tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in range(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size
examples_per_sec = num_examples_per_step / duration
sec_per_batch = float(duration)
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 10 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 10 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "callofdutyops/YXH2016724098982",
"path": "eye_train.py",
"copies": "1",
"size": "3366",
"license": "mit",
"hash": -5805734470023242000,
"line_mean": 33.3469387755,
"line_max": 82,
"alpha_frac": 0.5855614973,
"autogenerated": false,
"ratio": 4.0359712230215825,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012294074256208508,
"num_lines": 98
} |
"""A binary to train ocr using a single GPU."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import tensorflow as tf
import ocr
import ocr_input
import os
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', 'train_logs',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', True,
"""Whether to log device placement.""")
def train():
"""Train ocr for a number of steps."""
with tf.Graph().as_default():
global_step = tf.contrib.framework.get_or_create_global_step()
# Get images and labels for ocr.
print("Preparing input")
# with tf.device('/cpu:0'):
images, labels, seq_lengths = ocr.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
print("Building graph")
logits, timesteps = ocr.inference(images, FLAGS.batch_size, train=True)
# Calculate loss.
print("Creating loss")
loss = ocr.create_ctc_loss(logits, labels, timesteps, seq_lengths)
print("Creating LER")
ler = ocr.create_label_error_rate(logits, labels, timesteps)
print("Creating decoder")
decoded = ocr.check_decoder(logits, labels, timesteps)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
print("Creating train OP")
train_op, lr = ocr.train_simple(loss, global_step)
print("Creating init OP")
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
train_writer = tf.summary.FileWriter(FLAGS.train_dir,
sess.graph)
saver = tf.train.Saver()
summary_op = tf.summary.merge_all()
print("Starting training")
print_every_n = 1000
start_time = time.time()
mean_ler = 0
while not coord.should_stop():
try:
_, loss_res, lr_res, ler_res, summary_op_result, global_step_result, decoded_res = sess.run([train_op, loss, lr, ler, summary_op, global_step, decoded])
mean_ler += ler_res
if global_step_result % print_every_n == 0 or global_step_result == 1:
mean_steps_time = (time.time() - start_time) / print_every_n
mean_ler = mean_ler / print_every_n
status_string = "Step: {} Loss: {:.4f} LR: {:.6f} LER: {:.4f} Step time: {:.3f} sec"
print(status_string.format(global_step_result, loss_res, lr_res, ler_res, mean_steps_time))
# print("Decoded:")
# print(str(decoded_res))
# print("Timesteps:" + str(timesteps_res))
train_writer.add_summary(summary_op_result, global_step=global_step_result)
saver.save(sess, os.path.join(FLAGS.train_dir, 'checkpoint'), global_step=global_step)
start_time = time.time()
mean_ler = 0
# images_res = sess.run(images)
# print(images_res)
# for img in images_res:
# cv2.imshow("img", img)
# cv2.waitKey(0)
except Exception as e:
print(e)
coord.request_stop(e)
# class _LoggerHook(tf.train.SessionRunHook):
# """Logs loss and runtime."""
#
# def begin(self):
# self._step = -1
#
# def before_run(self, run_context):
# self._step += 1
# self._start_time = time.time()
# return tf.train.SessionRunArgs(loss) # Asks for loss value.
#
# def after_run(self, run_context, run_values):
# duration = time.time() - self._start_time
# loss_value = run_values.results
# if self._step % 10 == 0:
# num_examples_per_step = FLAGS.batch_size
# examples_per_sec = num_examples_per_step / duration
# sec_per_batch = float(duration)
#
# format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
# 'sec/batch)')
# print (format_str % (datetime.now(), self._step, loss_value,
# examples_per_sec, sec_per_batch))
#
# with tf.train.MonitoredTrainingSession(
# checkpoint_dir=FLAGS.train_dir,
# hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
# tf.train.NanTensorHook(loss),
# _LoggerHook()],
# config=tf.ConfigProto(
# log_device_placement=FLAGS.log_device_placement)) as mon_sess:
# while not mon_sess.should_stop():
# print("Running session")
# mon_sess.run(train_op)
def write_empty_inference_graph():
with tf.Graph().as_default():
print("Preparing input")
images = tf.placeholder(tf.float32, [1, ocr_input.IMAGE_WIDTH, ocr_input.IMAGE_HEIGHT, ocr_input.IMAGE_DEPTH])
logits, timesteps = ocr.inference(images, 1, train=True)
decoded, log_prob = tf.nn.ctc_greedy_decoder(logits, timesteps)
log_prob = tf.identity(log_prob, name="decoded_log_prob")
decoded = tf.cast(decoded[0], tf.int32, name="decoded_indexes")
init_op = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer())
sess = tf.Session()
sess.run(init_op)
tf.train.write_graph(sess.graph_def, FLAGS.train_dir, 'minimal_graph.proto', as_text=False)
tf.train.write_graph(sess.graph_def, FLAGS.train_dir, 'minimal_graph.txt', as_text=True)
def main(argv=None): # pylint: disable=unused-argument
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
write_empty_inference_graph()
train()
if __name__ == '__main__':
tf.app.run()
| {
"repo_name": "Luonic/tf-cnn-lstm-ocr-captcha",
"path": "ocr_train.py",
"copies": "1",
"size": "6646",
"license": "mit",
"hash": -7630216789593668000,
"line_mean": 40.5375,
"line_max": 168,
"alpha_frac": 0.551609991,
"autogenerated": false,
"ratio": 3.8195402298850576,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48711502208850577,
"avg_score": null,
"num_lines": null
} |
""" A binary tree implementation.
"""
class Node(object):
""" A binary tree node.
"""
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def __str__(self):
return str(self.data)
class BinaryTree(object):
def __init__(self, root_node=None):
self.root_node = root_node
def insert(self, data):
node = Node(data)
if self.root_node is None:
self.root_node = node
return
# Walk the tree and insert/replace
cursor = self.root_node
while True:
if node.data < cursor.data:
if cursor.left:
cursor = cursor.left
continue
cursor.left = node
break
elif node.data > cursor.data:
if cursor.right:
cursor = cursor.right
continue
cursor.right = node
break
else:
cursor = node
def populate(self, data_items):
self.root_node = None
for data in data_items:
self.insert(data)
def inorder(self, node, result=None):
""" Recursive in-order traversal.
"""
if result is None:
result = []
if node:
self.inorder(node.left, result)
result.append(node.data)
self.inorder(node.right, result)
return result
def __str__(self):
""" Return an inorder representation of the tree.
"""
result = self.inorder(self.root_node)
return ' '.join(result)
| {
"repo_name": "thisismyrobot/dsa",
"path": "src/binary_tree.py",
"copies": "1",
"size": "1762",
"license": "unlicense",
"hash": 7445221678168279000,
"line_mean": 23.1714285714,
"line_max": 57,
"alpha_frac": 0.4750283768,
"autogenerated": false,
"ratio": 4.588541666666667,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5563570043466667,
"avg_score": null,
"num_lines": null
} |
#A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom represent the minutes (0-59).
#
#Each LED represents a zero or one, with the least significant bit on the right.
#
#
#For example, the above binary watch reads "3:25".
#
#Given a non-negative integer n which represents the number of LEDs that are currently on, return all possible times the watch could represent.
#
#Example:
#
#Input: n = 1
#Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
#Note:
#The order of output does not matter.
#The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
#The minute must be consist of two digits and may contain a leading zero, for example "10:2" is not valid, it should be "10:02".
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
def cbin(x):
count = 0
while x:
x &= x-1
count += 1
return count
sol=[]
for i in xrange(12):
for j in xrange(60):
if cbin(i)+cbin(j)==num:
sol.append('%d:%02d' % (i, j))
return sol | {
"repo_name": "95subodh/Leetcode",
"path": "401. Binary Watch.py",
"copies": "1",
"size": "1129",
"license": "mit",
"hash": -1925867035701905000,
"line_mean": 30.3888888889,
"line_max": 143,
"alpha_frac": 0.6536758193,
"autogenerated": false,
"ratio": 2.872773536895674,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8755832570661716,
"avg_score": 0.05412335710679167,
"num_lines": 36
} |
#A 'Binney' quasi-isothermal DF
import math
import warnings
import numpy
from scipy import optimize, interpolate, integrate
from galpy import potential
from galpy import actionAngle
from galpy.actionAngle import actionAngleIsochrone
from galpy.potential import IsochronePotential
from galpy.orbit import Orbit
from galpy.util import galpyWarning
_NSIGMA=4
_DEFAULTNGL=10
_DEFAULTNGL2=20
class quasiisothermaldf(object):
"""Class that represents a 'Binney' quasi-isothermal DF"""
def __init__(self,hr,sr,sz,hsr,hsz,pot=None,aA=None,
cutcounter=False,
_precomputerg=True,_precomputergrmax=None,
_precomputergnLz=51,
ro=1.,lo=10./220./8.):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
hr - radial scale length
sr - radial velocity dispersion at the solar radius
sz - vertical velocity dispersion at the solar radius
hsr - radial-velocity-dispersion scale length
hsz - vertial-velocity-dispersion scale length
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions
cutcounter= if True, set counter-rotating stars' DF to zero
ro= reference radius for surface mass and sigmas
lo= reference angular momentum below where there are significant numbers of retrograde stars
OTHER INPUTS:
_precomputerg= if True (default), pre-compute the rL(L)
_precomputergrmax= if set, this is the maximum R for which to pre-compute rg (default: 5*hr)
_precomputergnLz if set, number of Lz to pre-compute rg for (default: 51)
OUTPUT:
object
HISTORY:
2012-07-25 - Started - Bovy (IAS@MPIA)
"""
self._hr= hr
self._sr= sr
self._sz= sz
self._hsr= hsr
self._hsz= hsz
self._ro= ro
self._lo= lo
self._lnsr= math.log(self._sr)
self._lnsz= math.log(self._sz)
if pot is None:
raise IOError("pot= must be set")
self._pot= pot
if aA is None:
raise IOError("aA= must be set")
self._aA= aA
if not self._aA._pot == self._pot:
if not isinstance(self._aA,actionAngleIsochrone):
raise IOError("Potential in aA does not appear to be the same as given potential pot")
elif isinstance(self._pot,IsochronePotential) and \
not self._aA.b == self._pot.b and \
not self._aA.amp == self._pot._amp:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._cutcounter= cutcounter
if _precomputerg:
if _precomputergrmax is None:
_precomputergrmax= 5*self._hr
self._precomputergrmax= _precomputergrmax
self._precomputergnLz= _precomputergnLz
self._precomputergLzmin= 0.01
self._precomputergLzmax= self._precomputergrmax\
*potential.vcirc(self._pot,self._precomputergrmax)
self._precomputergLzgrid= numpy.linspace(self._precomputergLzmin,self._precomputergLzmax,self._precomputergnLz)
self._rls= numpy.array([potential.rl(self._pot,l) for l in self._precomputergLzgrid])
#Spline interpolate
self._rgInterp= interpolate.InterpolatedUnivariateSpline(self._precomputergLzgrid,self._rls,k=3)
else:
self._precomputergrmax= 0.
self._rgInterp= None
self._rls= None
self._precomputergnr= None
self._precomputergLzgrid= None
self._precomputergLzmin= \
numpy.finfo(numpy.dtype(numpy.float64)).max
self._precomputergLzmax= \
numpy.finfo(numpy.dtype(numpy.float64)).min
self._precomputerg= _precomputerg
self._glxdef, self._glwdef= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL)
self._glxdef2, self._glwdef2= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL2)
self._glxdef12, self._glwdef12= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL//2)
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the DF
INPUT:
Either:
a)(jr,lz,jz) tuple
where:
jr - radial action
lz - z-component of angular momentum
jz - vertical action
b) R,vR,vT,z,vz
c) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
log= if True, return the natural log
+scipy.integrate.quadrature kwargs
func= function of (jr,lz,jz) to multiply f with (useful for moments)
OUTPUT:
value of DF
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
For Miyamoto-Nagai/adiabatic approximation this seems to take
about 30 ms / evaluation in the extended Solar neighborhood
For a MWPotential/adiabatic approximation this takes about
50 ms / evaluation in the extended Solar neighborhood
For adiabatic-approximation grid this seems to take
about 0.67 to 0.75 ms / evaluation in the extended Solar
neighborhood (includes some out of the grid)
up to 200x faster when called with vector R,vR,vT,z,vz
"""
#First parse log
log= kwargs.pop('log',False)
_return_actions= kwargs.pop('_return_actions',False)
_return_freqs= kwargs.pop('_return_freqs',False)
if 'rg' in kwargs:
thisrg= kwargs.pop('rg')
kappa= kwargs.pop('kappa')
nu= kwargs.pop('nu')
Omega= kwargs.pop('Omega')
else:
thisrg= None
kappa= None
nu= None
Omega= None
#First parse args
if len(args) == 1 and not isinstance(args[0],Orbit): #(jr,lz,jz)
jr,lz,jz= args[0]
else:
#Use self._aA to calculate the actions
try:
jr,lz,jz= self._aA(*args,**kwargs)
except actionAngle.UnboundError:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#if isinstance(jr,(list,numpy.ndarray)) and len(jr) > 1: jr= jr[0]
#if isinstance(jz,(list,numpy.ndarray)) and len(jz) > 1: jz= jz[0]
if not isinstance(lz,numpy.ndarray) and self._cutcounter and lz < 0.:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#First calculate rg
if thisrg is None:
thisrg= self.rg(lz)
#Then calculate the epicycle and vertical frequencies
kappa, nu= self._calc_epifreq(thisrg), self._calc_verticalfreq(thisrg)
Omega= numpy.fabs(lz)/thisrg/thisrg
#calculate surface-densities and sigmas
lnsurfmass= (self._ro-thisrg)/self._hr
lnsr= self._lnsr+(self._ro-thisrg)/self._hsr
lnsz= self._lnsz+(self._ro-thisrg)/self._hsz
#Calculate func
if 'func' in kwargs:
if log:
funcTerm= numpy.log(kwargs['func'](jr,lz,jz))
else:
funcFactor= kwargs['func'](jr,lz,jz)
#Calculate fsr
else:
if log:
funcTerm= 0.
else:
funcFactor= 1.
if log:
lnfsr= numpy.log(Omega)+lnsurfmass-2.*lnsr-math.log(math.pi)\
-numpy.log(kappa)\
+numpy.log(1.+numpy.tanh(lz/self._lo))\
-kappa*jr*numpy.exp(-2.*lnsr)
lnfsz= numpy.log(nu)-math.log(2.*math.pi)\
-2.*lnsz-nu*jz*numpy.exp(-2.*lnsz)
out= lnfsr+lnfsz+funcTerm
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
if self._cutcounter: out[(lz < 0.)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
elif numpy.isnan(out): out= -numpy.finfo(numpy.dtype(numpy.float64)).max
else:
srm2= numpy.exp(-2.*lnsr)
fsr= Omega*numpy.exp(lnsurfmass)*srm2/math.pi/kappa\
*(1.+numpy.tanh(lz/self._lo))\
*numpy.exp(-kappa*jr*srm2)
szm2= numpy.exp(-2.*lnsz)
fsz= nu/2./math.pi*szm2*numpy.exp(-nu*jz*szm2)
out= fsr*fsz*funcFactor
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= 0.
if self._cutcounter: out[(lz < 0.)]= 0.
elif numpy.isnan(out): out= 0.
if _return_actions and _return_freqs:
return (out,jr,lz,jz,thisrg,kappa,nu,Omega)
elif _return_actions:
return (out,jr,lz,jz)
elif _return_freqs:
return (out,thisrg,kappa,nu,Omega)
else:
return out
def estimate_hr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hr
PURPOSE:
estimate the exponential scale length at R
INPUT:
R - Galactocentric radius
z= height (default: 0 pc)
dR- range in R to use
density kwargs
OUTPUT:
estimated hR
HISTORY:
2012-09-11 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
Rs= [R-dR/2.,R+dR/2.]
if z is None:
sf= numpy.array([self.surfacemass_z(r,**kwargs) for r in Rs])
else:
sf= numpy.array([self.density(r,z,**kwargs) for r in Rs])
lsf= numpy.log(sf)
return -dR/(lsf[1]-lsf[0])
def estimate_hz(self,R,z,dz=10.**-8.,**kwargs):
"""
NAME:
estimate_hz
PURPOSE:
estimate the exponential scale height at R
INPUT:
R - Galactocentric radius
dz - z range to use
density kwargs
OUTPUT:
estimated hz
HISTORY:
2012-08-30 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
if z == 0.:
zs= [z,z+dz]
else:
zs= [z-dz/2.,z+dz/2.]
sf= numpy.array([self.density(R,zz,**kwargs) for zz in zs])
lsf= numpy.log(sf)
return -dz/(lsf[1]-lsf[0])
def estimate_hsr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsr
PURPOSE:
estimate the exponential scale length of the radial dispersion at R
INPUT:
R - Galactocentric radius
z= height (default: 0 pc)
dR- range in R to use
density kwargs
OUTPUT:
estimated hsR
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaR2(r,z,**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
def estimate_hsz(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsz
PURPOSE:
estimate the exponential scale length of the vertical dispersion at R
INPUT:
R - Galactocentric radius
z= height (default: 0 pc)
dR- range in R to use
density kwargs
OUTPUT:
estimated hsz
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaz2(r,z,**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
def surfacemass_z(self,R,nz=7,zmax=1.,fixed_quad=True,fixed_order=8,
**kwargs):
"""
NAME:
surfacemass_z
PURPOSE:
calculate the vertically-integrated surface density
INPUT:
R - Galactocentric radius
fixed_quad= if True (default), use Gauss-Legendre integration
fixed_order= (20), order of GL integration to use
nz= number of zs to use to estimate
zmax=m minimum z to use
density kwargs
OUTPUT:
\Sigma(R)
HISTORY:
2012-08-30 - Written - Bovy (IAS)
"""
if fixed_quad:
return 2.*integrate.fixed_quad(lambda x: self.density(R*numpy.ones(fixed_order),x),
0.,.5,n=fixed_order)[0]
zs= numpy.linspace(0.,zmax,nz)
sf= numpy.array([self.density(R,z,**kwargs) for z in zs])
lsf= numpy.log(sf)
#Interpolate
lsfInterp= interpolate.UnivariateSpline(zs,
lsf,
k=3)
#Integrate
return 2.*integrate.quad((lambda x: numpy.exp(lsfInterp(x))),
0.,1.)[0]
def vmomentdensity(self,R,z,n,m,o,nsigma=None,mc=False,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
_rawgausssamples=False,
gl=False,ngl=_DEFAULTNGL,_returngl=False,_glqeval=None,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None,_sigmaz1=None,
**kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over (when doing explicit numerical integral)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
"""
if isinstance(R,numpy.ndarray):
return numpy.array([self.vmomentdensity(r,zz,n,m,o,nsigma=nsigma,
mc=mc,nmc=nmc,
gl=gl,ngl=ngl,**kwargs) for r,zz in zip(R,z)])
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
if n % 2 == 1. or o % 2 == 1.:
return 0. #we know this must be the case
if nsigma == None:
nsigma= _NSIGMA
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._ro-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if _sigmaz1 is None:
sigmaz1= self._sz*numpy.exp((self._ro-R)/self._hsz)
else:
sigmaz1= _sigmaz1
thisvc= potential.vcirc(self._pot,R)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if not _glqeval is None and ngl != _glqeval.shape[0]:
_glqeval= None
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
#vRgl= 1.5/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
#vRgl.extend(-1.5/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
#vzgl= 1.5/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
#vzgl.extend(-1.5/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vTgl= 1.5/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,ngl,1)).T
vRgl= numpy.tile(numpy.reshape(vRgl,(1,ngl)).T,(ngl,1,ngl))
vzgl= numpy.tile(vzgl,(ngl,ngl,1))
vTglw= numpy.tile(glw,(ngl,ngl,1)).T #also tile weights
vRglw= numpy.tile(numpy.reshape(vRglw,(1,ngl)).T,(ngl,1,ngl))
vzglw= numpy.tile(vzglw,(ngl,ngl,1))
#evaluate
if _glqeval is None and _jr is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R+numpy.zeros(ngl*ngl*ngl),
vRgl.flatten(),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl*ngl),
vzgl.flatten(),
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
else:
logqeval= _glqeval
if _returngl:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.1875*nsigma**2,
logqeval)
elif _return_actions and _return_freqs:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.1875*nsigma**2,
jr,lz,jz,
rg,kappa,nu,Omega)
elif _return_actions:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.1875*nsigma**2,
jr,lz,jz)
else:
return numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw*sigmaR1*sigmaz1*0.1875*nsigma**2)
elif mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
if _rawgausssamples:
vts= _vts+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _vmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,
numpy.ones(nmc)*z,
self,sigmaR1,gamma,sigmaz1,mvT,
n,m,o)
if _returnmc:
if _rawgausssamples:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts-mvT,vzs)
else:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_vmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
def jmomentdensity(self,R,z,n,m,o,nsigma=None,mc=True,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
**kwargs):
"""
NAME:
jmomentdensity
PURPOSE:
calculate the an arbitrary moment of an action
of the velocity distribution
at R times the surfacmass
INPUT:
R - radius at which to calculate the moment(/ro)
n - jr^n
m - lz^m
o - jz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over (when doing explicit numerical integral)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
<jr^n lz^m jz^o x density> at R
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if nsigma == None:
nsigma= _NSIGMA
sigmaR1= self._sr*numpy.exp((self._ro-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._ro-R)/self._hsz)
thisvc= potential.vcirc(self._pot,R)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if math.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _jmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,numpy.ones(nmc)*z,self,sigmaR1,gamma,sigmaz1,mvT,n,m,o)
if _returnmc:
return (numpy.mean(Is)*sigmaR1**2.*gamma*sigmaz1,
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**2.*gamma*sigmaz1
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_jmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**2.*gamma*sigmaz1
def density(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
density
PURPOSE:
calculate the density at R,z by marginalizing over velocity
INPUT:
R - radius at which to calculate the density
z - height at which to calculate the density
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
density at (R,z)
HISTORY:
2012-07-26 - Written - Bovy (IAS@MPIA)
"""
return self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
gl=gl,ngl=ngl,
**kwargs)
def sigmaR2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def sigmaRz(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaRz
PURPOSE:
calculate sigma_RZ^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_Rz^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,1.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def tilt(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
tilt
PURPOSE:
calculate the tilt of the velocity ellipsoid by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
tilt in degree
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
tsigmar2= self.vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmaz2= self.vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmarz= self.vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))/numpy.pi*180.
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
tsigmar2= self.vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmaz2= self.vmomentdensity(R,z,0.,0.,2.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmarz= self.vmomentdensity(R,z,1.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))/numpy.pi*180.
else:
raise NotImplementedError("Use either mc=True or gl=True")
def sigmaz2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaz2
PURPOSE:
calculate sigma_z^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_z^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,0.,0.,2.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def meanvT(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvT
PURPOSE:
calculate the mean rotational velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvT
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,0.,1.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def meanvR(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvR
PURPOSE:
calculate the mean radial velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvR
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,1.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def meanvz(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvz
PURPOSE:
calculate the mean vertical velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvz
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.vmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self.vmomentdensity(R,z,0.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.vmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def sigmaT2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaT2
PURPOSE:
calculate sigma_T^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_T^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
mvt= self.vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
return self.vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass\
-mvt**2.
elif gl:
surfmass, glqeval= self.vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
mvt= self.vmomentdensity(R,z,0.,1.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
return self.vmomentdensity(R,z,0.,2.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass-mvt**2.
else: #pragma: no cover because this is too slow; a warning is shown
surfmass= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)
return (self.vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/surfmass\
-(self.vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/surfmass)**2.)
def meanjr(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanjr
PURPOSE:
calculate the mean radial action by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanjr
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.jmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.jmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def meanlz(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanlz
PURPOSE:
calculate the mean angular momemtum by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanlz
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.jmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.jmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def meanjz(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanjz
PURPOSE:
calculate the mean vertical action by marginalizing over velocity
INPUT:
R - radius at which to calculate this
z - height at which to calculate this
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanjz
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self.jmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self.jmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self.vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
def sampleV(self,R,z,n=1):
"""
NAME:
sampleV
PURPOSE:
sample a radial, azimuthal, and vertical velocity at R,z
INPUT:
R - Galactocentric distance
z - height
n= number of distances to sample
OUTPUT:
list of samples
HISTORY:
2012-12-17 - Written - Bovy (IAS)
"""
#Determine the maximum of the velocity distribution
maxVR= 0.
maxVz= 0.
maxVT= optimize.fmin_powell((lambda x: -self(R,0.,x,z,0.,log=True)),
1.)
logmaxVD= self(R,maxVR,maxVT,z,maxVz,log=True)
#Now rejection-sample
vRs= []
vTs= []
vzs= []
while len(vRs) < n:
nmore= n-len(vRs)+1
#sample
propvR= numpy.random.normal(size=nmore)*2.*self._sr
propvT= numpy.random.normal(size=nmore)*2.*self._sr+maxVT
propvz= numpy.random.normal(size=nmore)*2.*self._sz
VDatprop= self(R+numpy.zeros(nmore),
propvR,propvT,z+numpy.zeros(nmore),
propvz,log=True)-logmaxVD
VDatprop-= -0.5*(propvR**2./4./self._sr**2.+propvz**2./4./self._sz**2.\
+(propvT-maxVT)**2./4./self._sr**2.)
VDatprop= numpy.reshape(VDatprop,(nmore))
indx= (VDatprop > numpy.log(numpy.random.random(size=nmore))) #accept
vRs.extend(list(propvR[indx]))
vTs.extend(list(propvT[indx]))
vzs.extend(list(propvz[indx]))
out= numpy.empty((n,3))
out[:,0]= vRs[0:n]
out[:,1]= vTs[0:n]
out[:,2]= vzs[0:n]
return out
def pvR(self,vR,R,z,gl=True,ngl=_DEFAULTNGL2):
"""
NAME:
pvR
PURPOSE:
calculate the marginalized vR probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vR,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
sigmaz1= self._sz*numpy.exp((self._ro-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vzgl= 4.*sigmaz1/2.*(glx+1.)
vzglw= glw
else:
vzgl= 4.*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-4.*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vTgl= 1.5/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vTglw= numpy.tile(glw,(ngl,1)).T #also tile weights
vzglw= numpy.tile(vzglw,(ngl,1))
#evaluate
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vR+numpy.zeros(ngl*ngl),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vTglw*vzglw*sigmaz1)*1.5
def pvT(self,vT,R,z,gl=True,ngl=_DEFAULTNGL2):
"""
NAME:
pvT
PURPOSE:
calculate the marginalized vT probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vT,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
sigmaR1= self._sr*numpy.exp((self._ro-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._ro-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= 4.*sigmaR1/2.*(glx+1.)
vzgl= 4.*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
else:
vRgl= 4.*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-4.*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= 4.*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-4.*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
#Tile everything
vRgl= numpy.tile(vRgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vRglw= numpy.tile(vRglw,(ngl,1)).T #also tile weights
vzglw= numpy.tile(vzglw,(ngl,1))
#evaluate
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vRgl.flatten(),
vT+numpy.zeros(ngl*ngl),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vRglw*vzglw*sigmaR1*sigmaz1)
def pvz(self,vz,R,z,gl=True,ngl=_DEFAULTNGL2,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None):
"""
NAME:
pvz
PURPOSE:
calculate the marginalized vz probability at this location (NOT normalized by the density)
INPUT:
vz - vertical velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vz,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._ro-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= (glx+1.)
vRglw= glw
else:
vRgl= (glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-(glx12+1.))
vRgl= numpy.array(vRgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vTgl= 1.5/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,1)).T
vRgl= numpy.tile(vRgl,(ngl,1))
vTglw= numpy.tile(glw,(ngl,1)).T #also tile weights
vRglw= numpy.tile(vRglw,(ngl,1))
#If inputs are arrays, tile
if isinstance(R,numpy.ndarray):
nR= len(R)
R= numpy.tile(R,(ngl,ngl,1)).T.flatten()
z= numpy.tile(z,(ngl,ngl,1)).T.flatten()
vz= numpy.tile(vz,(ngl,ngl,1)).T.flatten()
vTgl= numpy.tile(vTgl,(nR,1,1)).flatten()
vRgl= numpy.tile(vRgl,(nR,1,1)).flatten()
vTglw= numpy.tile(vTglw,(nR,1,1))
vRglw= numpy.tile(vRglw,(nR,1,1))
scalarOut= False
else:
R= R+numpy.zeros(ngl*ngl)
z= z+numpy.zeros(ngl*ngl)
vz= vz+numpy.zeros(ngl*ngl)
nR= 1
scalarOut= True
vRgl= vRgl.flatten()
vRgl*= numpy.tile(4.*sigmaR1/2.,(ngl,ngl,1)).T.flatten()
#evaluate
if _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R,
vRgl.flatten(),
vTgl.flatten(),
z,
vz,
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R,
vRgl.flatten(),
vTgl.flatten(),
z,
vz,
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
vRglw= numpy.reshape(vRglw,(nR,ngl*ngl))
vTglw= numpy.reshape(vTglw,(nR,ngl*ngl))
if scalarOut:
result= numpy.sum(numpy.exp(logqeval)*vTglw*vRglw,axis=1)[0]*sigmaR1*1.5
else:
result= numpy.sum(numpy.exp(logqeval)*vTglw*vRglw,axis=1)*sigmaR1*1.5
if _return_actions and _return_freqs:
return (result,
jr,lz,jz,
rg, kappa, nu, Omega)
elif _return_freqs:
return (result,
rg, kappa, nu, Omega)
elif _return_actions:
return (result,
jr,lz,jz)
else:
return result
def pvRvT(self,vR,vT,R,z,gl=True,ngl=_DEFAULTNGL2):
"""
NAME:
pvRvT
PURPOSE:
calculate the marginalized (vR,vT) probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (/vo)
vT - tangential velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vR,vT,R,z)
HISTORY:
2013-01-02 - Written - Bovy (IAS)
"""
sigmaz1= self._sz*numpy.exp((self._ro-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vzgl= 4.*sigmaz1/2.*(glx+1.)
vzglw= glw
else:
vzgl= 4.*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-4.*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
#evaluate
logqeval= self(R+numpy.zeros(ngl),
vR+numpy.zeros(ngl),
vT+numpy.zeros(ngl),
z+numpy.zeros(ngl),
vzgl,
log=True)
return numpy.sum(numpy.exp(logqeval)*vzglw*sigmaz1)
def pvTvz(self,vT,vz,R,z,gl=True,ngl=_DEFAULTNGL2):
"""
NAME:
pvTvz
PURPOSE:
calculate the marginalized (vT,vz) probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (/vo)
vz - vertical velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vT,vz,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
sigmaR1= self._sr*numpy.exp((self._ro-R)/self._hsr)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= 4.*sigmaR1/2.*(glx+1.)
vRglw= glw
else:
vRgl= 4.*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-4.*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
#evaluate
logqeval= self(R+numpy.zeros(ngl),
vRgl,
vT+numpy.zeros(ngl),
z+numpy.zeros(ngl),
vz+numpy.zeros(ngl),
log=True)
return numpy.sum(numpy.exp(logqeval)*vRglw*sigmaR1)
def pvRvz(self,vR,vz,R,z,gl=True,ngl=_DEFAULTNGL2):
"""
NAME:
pvR
PURPOSE:
calculate the marginalized (vR,vz) probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (/vo)
vz - vertical velocity (/vo)
R - radius (/ro)
z - height (/ro)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
OUTPUT:
p(vR,vz,R,z)
HISTORY:
2013-01-02 - Written - Bovy (IAS)
"""
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
vTgl= 1.5/2.*(glx+1.)
vTglw= glw
#If inputs are arrays, tile
if isinstance(R,numpy.ndarray):
nR= len(R)
R= numpy.tile(R,(ngl,1)).T.flatten()
z= numpy.tile(z,(ngl,1)).T.flatten()
vR= numpy.tile(vR,(ngl,1)).T.flatten()
vz= numpy.tile(vz,(ngl,1)).T.flatten()
vTgl= numpy.tile(vTgl,(nR,1)).flatten()
vTglw= numpy.tile(vTglw,(nR,1))
scalarOut= False
else:
R= R+numpy.zeros(ngl)
vR= vR+numpy.zeros(ngl)
z= z+numpy.zeros(ngl)
vz= vz+numpy.zeros(ngl)
nR= 1
scalarOut= True
#evaluate
logqeval= numpy.reshape(self(R,
vR,
vTgl,
z,
vz,
log=True),
(nR,ngl))
out= numpy.sum(numpy.exp(logqeval)*vTglw,axis=1)
if scalarOut: return out[0]
else: return out
def _calc_epifreq(self,r):
"""
NAME:
_calc_epifreq
PURPOSE:
calculate the epicycle frequency at r
INPUT:
r - radius
OUTPUT:
kappa
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
takes about 0.1 ms for a Miyamoto-Nagai potential
"""
return potential.epifreq(self._pot,r)
def _calc_verticalfreq(self,r):
"""
NAME:
_calc_verticalfreq
PURPOSE:
calculate the vertical frequency at r
INPUT:
r - radius
OUTPUT:
nu
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
takes about 0.05 ms for a Miyamoto-Nagai potential
"""
return potential.verticalfreq(self._pot,r)
def rg(self,lz):
"""
NAME:
rg
PURPOSE:
calculate the radius of a circular orbit of Lz
INPUT:
lz - Angular momentum
OUTPUT:
radius
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
seems to take about ~0.5 ms for a Miyamoto-Nagai potential;
~0.75 ms for a MWPotential
about the same with or without interpolation of the rotation curve
Not sure what to do about negative lz...
"""
if isinstance(lz,numpy.ndarray):
indx= (lz > self._precomputergLzmax)*(lz < self._precomputergLzmin)
indxc= True-indx
out= numpy.empty(lz.shape)
out[indxc]= self._rgInterp(lz[indxc])
out[indx]= numpy.array([potential.rl(self._pot,lz[indx][ii]) for ii in range(numpy.sum(indx))])
return out
else:
if lz > self._precomputergLzmax or lz < self._precomputergLzmin:
return potential.rl(self._pot,lz)
return numpy.atleast_1d(self._rgInterp(lz))
def _vmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1)
def _vmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1)*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.)
def _jmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,
func= (lambda x,y,z: x**n*y**m*z**o))
def _jmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,
func=(lambda x,y,z: x**n*y**m*z**o))\
*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.)
| {
"repo_name": "followthesheep/galpy",
"path": "galpy/df_src/quasiisothermaldf.py",
"copies": "1",
"size": "76948",
"license": "bsd-3-clause",
"hash": -3468241802468573000,
"line_mean": 39.0770833333,
"line_max": 180,
"alpha_frac": 0.4611425898,
"autogenerated": false,
"ratio": 3.8427886536156612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9716397724868694,
"avg_score": 0.01750670370939349,
"num_lines": 1920
} |
#A 'Binney' quasi-isothermal DF
import warnings
import hashlib
import numpy
from scipy import optimize, interpolate, integrate
from .. import potential
from .. import actionAngle
from ..actionAngle import actionAngleIsochrone
from ..potential import IsochronePotential
from ..potential import flatten as flatten_potential
from ..orbit import Orbit
from .df import df
from ..util import galpyWarning
from ..util.conversion import physical_conversion, \
potential_physical_input, actionAngle_physical_input, _APY_UNITS, \
physical_compatible, parse_length, parse_velocity, parse_angmom, \
parse_length_kpc, parse_velocity_kms, _APY_LOADED
if _APY_LOADED:
from astropy import units
_NSIGMA=4
_DEFAULTNGL=10
_DEFAULTNGL2=20
class quasiisothermaldf(df):
"""Class that represents a 'Binney' quasi-isothermal DF"""
def __init__(self,hr,sr,sz,hsr,hsz,pot=None,aA=None,
cutcounter=False,
_precomputerg=True,_precomputergrmax=None,
_precomputergnLz=51,
refr=1.,lo=10./220./8.,
ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
Initialize a quasi-isothermal DF
INPUT:
hr - radial scale length (can be Quantity)
sr - radial velocity dispersion at the solar radius (can be Quantity)
sz - vertical velocity dispersion at the solar radius (can be Quantity)
hsr - radial-velocity-dispersion scale length (can be Quantity)
hsz - vertial-velocity-dispersion scale length (can be Quantity)
pot= Potential instance or list thereof
aA= actionAngle instance used to convert (x,v) to actions [must be an instance of an actionAngle class that computes (J,Omega,angle) for a given (x,v)]
cutcounter= if True, set counter-rotating stars' DF to zero
refr= reference radius for dispersions (can be different from ro) (can be Quantity)
lo= reference angular momentum below where there are significant numbers of retrograde stars (can be Quantity)
ro= distance from vantage point to GC (kpc; can be Quantity)
vo= circular velocity at ro (km/s; can be Quantity)
OTHER INPUTS:
_precomputerg= if True (default), pre-compute the rL(L)
_precomputergrmax= if set, this is the maximum R for which to pre-compute rg (default: 5*hr)
_precomputergnLz if set, number of Lz to pre-compute rg for (default: 51)
OUTPUT:
object
HISTORY:
2012-07-25 - Started - Bovy (IAS@MPIA)
"""
df.__init__(self,ro=ro,vo=vo)
self._hr= parse_length(hr,ro=self._ro)
self._sr= parse_velocity(sr,vo=self._vo)
self._sz= parse_velocity(sz,vo=self._vo)
self._hsr= parse_length(hsr,ro=self._ro)
self._hsz= parse_length(hsz,ro=self._ro)
self._refr= parse_length(refr,ro=self._ro)
self._lo= parse_angmom(lo,ro=self._ro,vo=self._vo)
self._lnsr= numpy.log(self._sr)
self._lnsz= numpy.log(self._sz)
self._maxVT_hash= None
self._maxVT_ip= None
if pot is None:
raise IOError("pot= must be set")
self._pot= flatten_potential(pot)
if aA is None:
raise IOError("aA= must be set")
self._aA= aA
if not self._aA._pot == self._pot:
if not isinstance(self._aA,actionAngleIsochrone):
raise IOError("Potential in aA does not appear to be the same as given potential pot")
elif isinstance(self._pot,IsochronePotential) and \
not self._aA.b == self._pot.b and \
not self._aA.amp == self._pot._amp:
raise IOError("Potential in aA does not appear to be the same as given potential pot")
self._check_consistent_units()
self._cutcounter= cutcounter
if _precomputerg:
if _precomputergrmax is None:
_precomputergrmax= 5*self._hr
self._precomputergrmax= _precomputergrmax
self._precomputergnLz= _precomputergnLz
self._precomputergLzmin= 0.01
self._precomputergLzmax= self._precomputergrmax\
*potential.vcirc(self._pot,self._precomputergrmax)
self._precomputergLzgrid= numpy.linspace(self._precomputergLzmin,self._precomputergLzmax,self._precomputergnLz)
self._rls= numpy.array([potential.rl(self._pot,l) for l in self._precomputergLzgrid])
#Spline interpolate
self._rgInterp= interpolate.InterpolatedUnivariateSpline(self._precomputergLzgrid,self._rls,k=3)
else:
self._precomputergrmax= 0.
self._rgInterp= None
self._rls= None
self._precomputergnr= None
self._precomputergLzgrid= None
self._precomputergLzmin= \
numpy.finfo(numpy.dtype(numpy.float64)).max
self._precomputergLzmax= \
numpy.finfo(numpy.dtype(numpy.float64)).min
self._precomputerg= _precomputerg
self._glxdef, self._glwdef= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL)
self._glxdef2, self._glwdef2= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL2)
self._glxdef12, self._glwdef12= \
numpy.polynomial.legendre.leggauss(_DEFAULTNGL//2)
return None
@physical_conversion('phasespacedensity',pop=True)
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
return the DF
INPUT:
Either:
a)(jr,lz,jz) tuple; each can be a Quantity
where:
jr - radial action
lz - z-component of angular momentum
jz - vertical action
b) R,vR,vT,z,vz
c) Orbit instance: initial condition used if that's it, orbit(t)
if there is a time given as well
log= if True, return the natural log
+scipy.integrate.quadrature kwargs
func= function of (jr,lz,jz) to multiply f with (useful for moments)
OUTPUT:
value of DF
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
For Miyamoto-Nagai/adiabatic approximation this seems to take
about 30 ms / evaluation in the extended Solar neighborhood
For a MWPotential/adiabatic approximation this takes about
50 ms / evaluation in the extended Solar neighborhood
For adiabatic-approximation grid this seems to take
about 0.67 to 0.75 ms / evaluation in the extended Solar
neighborhood (includes some out of the grid)
up to 200x faster when called with vector R,vR,vT,z,vz
"""
#First parse log
log= kwargs.pop('log',False)
_return_actions= kwargs.pop('_return_actions',False)
_return_freqs= kwargs.pop('_return_freqs',False)
_func= kwargs.pop('func',None)
if 'rg' in kwargs:
thisrg= kwargs.pop('rg')
kappa= kwargs.pop('kappa')
nu= kwargs.pop('nu')
Omega= kwargs.pop('Omega')
else:
thisrg= None
kappa= None
nu= None
Omega= None
#First parse args
if len(args) == 1 and not isinstance(args[0],Orbit): #(jr,lz,jz)
jr,lz,jz= args[0]
jr= parse_angmom(jr,ro=self._ro,vo=self._vo)
lz= parse_angmom(lz,ro=self._ro,vo=self._vo)
jz= parse_angmom(jz,ro=self._ro,vo=self._vo)
else:
#Use self._aA to calculate the actions
if isinstance(args[0],Orbit) and len(args[0].shape) > 1:
raise RuntimeError("Evaluating quasiisothermaldf with Orbit instances with multi-dimensional shapes is not supported") #pragma: no cover
try:
jr,lz,jz= self._aA(*args,use_physical=False,**kwargs)
except actionAngle.UnboundError:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#if isinstance(jr,(list,numpy.ndarray)) and len(jr) > 1: jr= jr[0]
#if isinstance(jz,(list,numpy.ndarray)) and len(jz) > 1: jz= jz[0]
if not isinstance(lz,numpy.ndarray) and self._cutcounter and lz < 0.:
if log: return -numpy.finfo(numpy.dtype(numpy.float64)).max
else: return 0.
#First calculate rg
if thisrg is None:
thisrg= self._rg(lz)
#Then calculate the epicycle and vertical frequencies
kappa, nu= self._calc_epifreq(thisrg), self._calc_verticalfreq(thisrg)
Omega= numpy.fabs(lz)/thisrg/thisrg
#calculate surface-densities and sigmas
lnsurfmass= (self._refr-thisrg)/self._hr
lnsr= self._lnsr+(self._refr-thisrg)/self._hsr
lnsz= self._lnsz+(self._refr-thisrg)/self._hsz
#Calculate func
if not _func is None:
if log:
funcTerm= numpy.log(_func(jr,lz,jz))
else:
funcFactor= _func(jr,lz,jz)
#Calculate fsr
else:
if log:
funcTerm= 0.
else:
funcFactor= 1.
if log:
lnfsr= numpy.log(Omega)+lnsurfmass-2.*lnsr-numpy.log(numpy.pi)\
-numpy.log(kappa)\
+numpy.log(1.+numpy.tanh(lz/self._lo))\
-kappa*jr*numpy.exp(-2.*lnsr)
lnfsz= numpy.log(nu)-numpy.log(2.*numpy.pi)\
-2.*lnsz-nu*jz*numpy.exp(-2.*lnsz)
out= lnfsr+lnfsz+funcTerm
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
if self._cutcounter: out[(lz < 0.)]= -numpy.finfo(numpy.dtype(numpy.float64)).max
elif numpy.isnan(out): out= -numpy.finfo(numpy.dtype(numpy.float64)).max
else:
srm2= numpy.exp(-2.*lnsr)
fsr= Omega*numpy.exp(lnsurfmass)*srm2/numpy.pi/kappa\
*(1.+numpy.tanh(lz/self._lo))\
*numpy.exp(-kappa*jr*srm2)
szm2= numpy.exp(-2.*lnsz)
fsz= nu/2./numpy.pi*szm2*numpy.exp(-nu*jz*szm2)
out= fsr*fsz*funcFactor
if isinstance(lz,numpy.ndarray):
out[numpy.isnan(out)]= 0.
if self._cutcounter: out[(lz < 0.)]= 0.
elif numpy.isnan(out): out= 0.
if _return_actions and _return_freqs:
return (out,jr,lz,jz,thisrg,kappa,nu,Omega)
elif _return_actions:
return (out,jr,lz,jz)
elif _return_freqs:
return (out,thisrg,kappa,nu,Omega)
else:
return out
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hr
PURPOSE:
estimate the exponential scale length at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hR
HISTORY:
2012-09-11 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
Rs= [R-dR/2.,R+dR/2.]
if z is None:
sf= numpy.array([self.surfacemass_z(r,use_physical=False,
**kwargs) for r in Rs])
else:
sf= numpy.array([self.density(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hz(self,R,z,dz=10.**-8.,**kwargs):
"""
NAME:
estimate_hz
PURPOSE:
estimate the exponential scale height at R
INPUT:
R - Galactocentric radius (can be Quantity)
dz - z range to use (can be Quantity)
density kwargs
OUTPUT:
estimated hz
HISTORY:
2012-08-30 - Written - Bovy (IAS)
2013-01-28 - Re-written - Bovy
"""
if z == 0.:
zs= [z,z+dz]
else:
zs= [z-dz/2.,z+dz/2.]
sf= numpy.array([self.density(R,zz,use_physical=False,
**kwargs) for zz in zs])
lsf= numpy.log(sf)
return -dz/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hsr(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsr
PURPOSE:
estimate the exponential scale length of the radial dispersion at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hsR
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaR2(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('position',pop=True)
def estimate_hsz(self,R,z=0.,dR=10.**-8.,**kwargs):
"""
NAME:
estimate_hsz
PURPOSE:
estimate the exponential scale length of the vertical dispersion at R
INPUT:
R - Galactocentric radius (can be Quantity)
z= height (default: 0 pc) (can be Quantity)
dR- range in R to use (can be Quantity)
density kwargs
OUTPUT:
estimated hsz
HISTORY:
2013-03-08 - Written - Bovy (IAS)
"""
Rs= [R-dR/2.,R+dR/2.]
sf= numpy.array([self.sigmaz2(r,z,use_physical=False,
**kwargs) for r in Rs])
lsf= numpy.log(sf)/2.
return -dR/(lsf[1]-lsf[0])
@potential_physical_input
@physical_conversion('numbersurfacedensity',pop=True)
def surfacemass_z(self,R,nz=7,zmax=1.,fixed_quad=True,fixed_order=8,
**kwargs):
"""
NAME:
surfacemass_z
PURPOSE:
calculate the vertically-integrated surface density
INPUT:
R - Galactocentric radius (can be Quantity)
fixed_quad= if True (default), use Gauss-Legendre integration
fixed_order= (20), order of GL integration to use
nz= number of zs to use to estimate
zmax= maximum z to use (can be Quantity)
density kwargs
OUTPUT:
\Sigma(R)
HISTORY:
2012-08-30 - Written - Bovy (IAS)
"""
if fixed_quad:
return 2.*integrate.fixed_quad(lambda x: self.density(R*numpy.ones(fixed_order),x,use_physical=False),
0.,.5,n=fixed_order)[0]
zs= numpy.linspace(0.,zmax,nz)
sf= numpy.array([self.density(R,z,use_physical=False,
**kwargs) for z in zs])
lsf= numpy.log(sf)
#Interpolate
lsfInterp= interpolate.UnivariateSpline(zs,
lsf,
k=3)
#Integrate
return 2.*integrate.quad((lambda x: numpy.exp(lsfInterp(x))),
0.,1.)[0]
def vmomentdensity(self,*args,**kwargs):
"""
NAME:
vmomentdensity
PURPOSE:
calculate the an arbitrary moment of the velocity distribution
at R times the density
INPUT:
R - radius at which to calculate the moment(/ro)
n - vR^n
m - vT^m
o - vz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the vR and vz velocities over (when doing explicit numerical integral; default: 4)
vTmax - upper limit for integration over vT (default: 1.5)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= use Gauss-Legendre
_returngl= if True, return the evaluated DF
_return_actions= if True, return the evaluated actions (does not work with _returngl currently)
_return_freqs= if True, return the evaluated frequencies and rg (does not work with _returngl currently)
OUTPUT:
<vR^n vT^m x density> at R,z (no support for units)
HISTORY:
2012-08-06 - Written - Bovy (IAS@MPIA)
"""
use_physical= kwargs.pop('use_physical',True)
ro= kwargs.pop('ro',None)
if ro is None and hasattr(self,'_roSet') and self._roSet:
ro= self._ro
ro= parse_length_kpc(ro)
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
vo= parse_velocity_kms(vo)
if use_physical and not vo is None and not ro is None:
fac= vo**(args[2]+args[3]+args[4])/ro**3
if _APY_UNITS:
u= 1/units.kpc**3*(units.km/units.s)**(args[2]+args[3]+args[4])
out= self._vmomentdensity(*args,**kwargs)
if _APY_UNITS:
return units.Quantity(out*fac,unit=u)
else:
return out*fac
else:
return self._vmomentdensity(*args,**kwargs)
def _vmomentdensity(self,R,z,n,m,o,nsigma=None,mc=False,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
_rawgausssamples=False,
gl=False,ngl=_DEFAULTNGL,_returngl=False,_glqeval=None,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None,_sigmaz1=None,
**kwargs):
"""Non-physical version of vmomentdensity, otherwise the same"""
if isinstance(R,numpy.ndarray):
return numpy.array([self._vmomentdensity(r,zz,n,m,o,nsigma=nsigma,
mc=mc,nmc=nmc,
gl=gl,ngl=ngl,**kwargs) for r,zz in zip(R,z)])
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
if n % 2 == 1. or o % 2 == 1.:
return 0. #we know this must be the case
if nsigma == None:
nsigma= _NSIGMA
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if _sigmaz1 is None:
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
else:
sigmaz1= _sigmaz1
thisvc= potential.vcirc(self._pot,R,use_physical=False)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if numpy.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
if not _glqeval is None and ngl != _glqeval.shape[0]:
_glqeval= None
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
#vRgl= 1.5/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
#vRgl.extend(-1.5/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
#vzgl= 1.5/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
#vzgl.extend(-1.5/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
if 'vTmax' in kwargs: vTmax = kwargs['vTmax']
else: vTmax = 1.5
vTgl= vTmax/2.*(glx+1.)
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,ngl,1)).T
vRgl= numpy.tile(numpy.reshape(vRgl,(1,ngl)).T,(ngl,1,ngl))
vzgl= numpy.tile(vzgl,(ngl,ngl,1))
vTglw= numpy.tile(glw,(ngl,ngl,1)).T #also tile weights
vRglw= numpy.tile(numpy.reshape(vRglw,(1,ngl)).T,(ngl,1,ngl))
vzglw= numpy.tile(vzglw,(ngl,ngl,1))
#evaluate
if _glqeval is None and _jr is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R+numpy.zeros(ngl*ngl*ngl),
vRgl.flatten(),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl*ngl),
vzgl.flatten(),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(ngl,ngl,ngl))
else:
logqeval= _glqeval
if _returngl:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
logqeval)
elif _return_actions and _return_freqs:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz,
rg,kappa,nu,Omega)
elif _return_actions:
return (numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw)*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2,
jr,lz,jz)
else:
return numpy.sum(numpy.exp(logqeval)*vRgl**n*vTgl**m*vzgl**o
*vTglw*vRglw*vzglw*sigmaR1*sigmaz1*0.125*vTmax*nsigma**2)
elif mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
if _rawgausssamples:
vts= _vts+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _vmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,
numpy.ones(nmc)*z,
self,sigmaR1,gamma,sigmaz1,mvT,
n,m,o)
if _returnmc:
if _rawgausssamples:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts-mvT,vzs)
else:
return (numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o),
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_vmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**(2.+n+m)*gamma**(1.+m)*sigmaz1**(1.+o)
def jmomentdensity(self,*args,**kwargs):
"""
NAME:
jmomentdensity
PURPOSE:
calculate the an arbitrary moment of an action
of the velocity distribution
at R times the surfacmass
INPUT:
R - radius at which to calculate the moment(/ro)
n - jr^n
m - lz^m
o - jz^o
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over (when doing explicit numerical integral)
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
<jr^n lz^m jz^o x density> at R (no support for units)
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
use_physical= kwargs.pop('use_physical',True)
ro= kwargs.pop('ro',None)
if ro is None and hasattr(self,'_roSet') and self._roSet:
ro= self._ro
ro= parse_length_kpc(ro)
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
vo= parse_velocity_kms(vo)
if use_physical and not vo is None and not ro is None:
fac= (ro*vo)**(args[2]+args[3]+args[4])/ro**3
if _APY_UNITS:
u= 1/units.kpc**3*(units.kpc*units.km/units.s)**(args[2]+args[3]+args[4])
out= self._jmomentdensity(*args,**kwargs)
if _APY_UNITS:
return units.Quantity(out*fac,unit=u)
else:
return out*fac
else:
return self._jmomentdensity(*args,**kwargs)
def _jmomentdensity(self,R,z,n,m,o,nsigma=None,mc=True,nmc=10000,
_returnmc=False,_vrs=None,_vts=None,_vzs=None,
**kwargs):
"""Non-physical version of jmomentdensity, otherwise the same"""
if nsigma == None:
nsigma= _NSIGMA
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
thisvc= potential.vcirc(self._pot,R,use_physical=False)
#Use the asymmetric drift equation to estimate va
gamma= numpy.sqrt(0.5)
va= sigmaR1**2./2./thisvc\
*(gamma**2.-1. #Assume close to flat rotation curve, sigphi2/sigR2 =~ 0.5
+R*(1./self._hr+2./self._hsr))
if numpy.fabs(va) > sigmaR1: va = 0.#To avoid craziness near the center
if mc:
mvT= (thisvc-va)/gamma/sigmaR1
if _vrs is None:
vrs= numpy.random.normal(size=nmc)
else:
vrs= _vrs
if _vts is None:
vts= numpy.random.normal(size=nmc)+mvT
else:
vts= _vts
if _vzs is None:
vzs= numpy.random.normal(size=nmc)
else:
vzs= _vzs
Is= _jmomentsurfaceMCIntegrand(vzs,vrs,vts,numpy.ones(nmc)*R,numpy.ones(nmc)*z,self,sigmaR1,gamma,sigmaz1,mvT,n,m,o)
if _returnmc:
return (numpy.mean(Is)*sigmaR1**2.*gamma*sigmaz1,
vrs,vts,vzs)
else:
return numpy.mean(Is)*sigmaR1**2.*gamma*sigmaz1
else: #pragma: no cover because this is too slow; a warning is shown
warnings.warn("Calculations using direct numerical integration using tplquad is not recommended and extremely slow; it has also not been carefully tested",galpyWarning)
return integrate.tplquad(_jmomentsurfaceIntegrand,
1./gamma*(thisvc-va)/sigmaR1-nsigma,
1./gamma*(thisvc-va)/sigmaR1+nsigma,
lambda x: 0., lambda x: nsigma,
lambda x,y: 0., lambda x,y: nsigma,
(R,z,self,sigmaR1,gamma,sigmaz1,n,m,o),
**kwargs)[0]*sigmaR1**2.*gamma*sigmaz1
@potential_physical_input
@physical_conversion('numberdensity',pop=True)
def density(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
density
PURPOSE:
calculate the density at R,z by marginalizing over velocity
INPUT:
R - radius at which to calculate the density (can be Quantity)
z - height at which to calculate the density (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
density at (R,z)
HISTORY:
2012-07-26 - Written - Bovy (IAS@MPIA)
"""
return self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
gl=gl,ngl=ngl,
**kwargs)
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaR2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaR2
PURPOSE:
calculate sigma_R^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_R^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaRz(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaRz
PURPOSE:
calculate sigma_RZ^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_Rz^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,1.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('angle',pop=True)
def tilt(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
tilt
PURPOSE:
calculate the tilt of the velocity ellipsoid by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
tilt in rad
HISTORY:
2012-12-23 - Written - Bovy (IAS)
2017-10-28 - Changed return unit to rad - Bovy (UofT)
"""
warnings.warn("In versions >1.3, the output unit of quasiisothermaldf.tilt has been changed to radian (from degree before)",galpyWarning)
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
tsigmar2= self._vmomentdensity(R,z,2.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmaz2= self._vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
tsigmarz= self._vmomentdensity(R,z,1.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
tsigmar2= self._vmomentdensity(R,z,2.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmaz2= self._vmomentdensity(R,z,0.,0.,2.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
tsigmarz= self._vmomentdensity(R,z,1.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
return 0.5*numpy.arctan(2.*tsigmarz/(tsigmar2-tsigmaz2))
else:
raise NotImplementedError("Use either mc=True or gl=True")
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaz2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaz2
PURPOSE:
calculate sigma_z^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_z^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,0.,0.,2.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,0.,0.,2.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('velocity',pop=True)
def meanvT(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvT
PURPOSE:
calculate the mean rotational velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvT
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,0.,1.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('velocity',pop=True)
def meanvR(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvR
PURPOSE:
calculate the mean radial velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvR
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,1.,0.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('velocity',pop=True)
def meanvz(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
meanvz
PURPOSE:
calculate the mean vertical velocity by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
meanvz
HISTORY:
2012-12-23 - Written - Bovy (IAS)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._vmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
return self._vmomentdensity(R,z,0.,0.,1.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._vmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('velocity2',pop=True)
def sigmaT2(self,R,z,nsigma=None,mc=False,nmc=10000,
gl=True,ngl=_DEFAULTNGL,**kwargs):
"""
NAME:
sigmaT2
PURPOSE:
calculate sigma_T^2 by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
gl= if True, calculate using Gauss-Legendre integration
ngl= if gl, use ngl-th order Gauss-Legendre integration for each dimension
OUTPUT:
sigma_T^2
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
mvt= self._vmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
return self._vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass\
-mvt**2.
elif gl:
surfmass, glqeval= self._vmomentdensity(R,z,0.,0.,0.,
gl=gl,ngl=ngl,
_returngl=True,
**kwargs)
mvt= self._vmomentdensity(R,z,0.,1.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass
return self._vmomentdensity(R,z,0.,2.,0.,
ngl=ngl,gl=gl,
_glqeval=glqeval,
**kwargs)/surfmass-mvt**2.
else: #pragma: no cover because this is too slow; a warning is shown
surfmass= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)
return (self._vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/surfmass\
-(self._vmomentdensity(R,z,0.,2.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/surfmass)**2.)
@potential_physical_input
@physical_conversion('action',pop=True)
def meanjr(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanjr
PURPOSE:
calculate the mean radial action by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanjr
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._jmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._jmomentdensity(R,z,1.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('action',pop=True)
def meanlz(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanlz
PURPOSE:
calculate the mean angular momemtum by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanlz
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._jmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._jmomentdensity(R,z,0.,1.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
@physical_conversion('action',pop=True)
def meanjz(self,R,z,nsigma=None,mc=True,nmc=10000,**kwargs):
"""
NAME:
meanjz
PURPOSE:
calculate the mean vertical action by marginalizing over velocity
INPUT:
R - radius at which to calculate this (can be Quantity)
z - height at which to calculate this (can be Quantity)
OPTIONAL INPUT:
nsigma - number of sigma to integrate the velocities over
scipy.integrate.tplquad kwargs epsabs and epsrel
mc= if True, calculate using Monte Carlo integration
nmc= if mc, use nmc samples
OUTPUT:
meanjz
HISTORY:
2012-08-09 - Written - Bovy (IAS@MPIA)
"""
if mc:
surfmass, vrs, vts, vzs= self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=True,
**kwargs)
return self._jmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,_returnmc=False,
_vrs=vrs,_vts=vts,_vzs=vzs,
**kwargs)/surfmass
else: #pragma: no cover because this is too slow; a warning is shown
return (self._jmomentdensity(R,z,0.,0.,1.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs)/
self._vmomentdensity(R,z,0.,0.,0.,
nsigma=nsigma,mc=mc,nmc=nmc,
**kwargs))
@potential_physical_input
def sampleV(self,R,z,n=1,**kwargs):
"""
NAME:
sampleV
PURPOSE:
sample a radial, azimuthal, and vertical velocity at R,z
INPUT:
R - Galactocentric distance (can be Quantity)
z - height (can be Quantity)
n= number of distances to sample
OUTPUT:
list of samples
HISTORY:
2012-12-17 - Written - Bovy (IAS)
"""
use_physical= kwargs.pop('use_physical',True)
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
vo= parse_velocity_kms(vo)
#Determine the maximum of the velocity distribution
maxVR= 0.
maxVz= 0.
# scipy 1.5.0: issue scipy#12298: fmin_powell now returns multiD array,
# so squeeze out single dimensions by hand
maxVT= numpy.squeeze(\
optimize.fmin_powell((lambda x: -self(R,0.,x,z,0.,log=True,
use_physical=False)),
1.))
logmaxVD= self(R,maxVR,maxVT,z,maxVz,log=True,use_physical=False)
#Now rejection-sample
vRs= []
vTs= []
vzs= []
while len(vRs) < n:
nmore= n-len(vRs)+1
#sample
propvR= numpy.random.normal(size=nmore)*2.*self._sr
propvT= numpy.random.normal(size=nmore)*2.*self._sr+maxVT
propvz= numpy.random.normal(size=nmore)*2.*self._sz
VDatprop= self(R+numpy.zeros(nmore),
propvR,propvT,z+numpy.zeros(nmore),
propvz,log=True,use_physical=False)-logmaxVD
VDatprop-= -0.5*(propvR**2./4./self._sr**2.+propvz**2./4./self._sz**2.\
+(propvT-maxVT)**2./4./self._sr**2.)
VDatprop= numpy.reshape(VDatprop,(nmore))
indx= (VDatprop > numpy.log(numpy.random.random(size=nmore))) #accept
vRs.extend(list(propvR[indx]))
vTs.extend(list(propvT[indx]))
vzs.extend(list(propvz[indx]))
out= numpy.empty((n,3))
out[:,0]= vRs[0:n]
out[:,1]= vTs[0:n]
out[:,2]= vzs[0:n]
if use_physical and not vo is None:
if _APY_UNITS:
return units.Quantity(out*vo,unit=units.km/units.s)
else:
return out*vo
else:
return out
@potential_physical_input
def sampleV_interpolate(self,R,z,R_pixel,z_pixel,num_std=3,R_min=None,
R_max=None,z_max=None,**kwargs):
"""
NAME:
sampleV_interpolate
PURPOSE:
Given an array of R and z coordinates of stars, return the
positions and their radial, azimuthal, and vertical velocity.
INPUT:
R - array of Galactocentric distance (can be Quantity)
z - array of height (can be Quantity)
R_pixel, z_pixel= the pixel size for creating the grid for
interpolation (in natural unit)
num_std= number of standard deviation to be considered outliers
sampled separately from interpolation
R_min, R_max, z_max= optional edges of the grid
OUTPUT:
coord_v= a numpy array containing the sampled velocity, (vR, vT, vz),
where each row correspond to the row of (R,z)
HISTORY:
2018-08-10 - Written - Samuel Wong (University of Toronto)
"""
use_physical= kwargs.pop('use_physical',True)
vo= kwargs.pop('vo',None)
if vo is None and hasattr(self,'_voSet') and self._voSet:
vo= self._vo
vo= parse_velocity_kms(vo)
#Initialize output array
coord_v= numpy.empty((numpy.size(R), 3))
#Since the sign of z doesn't matter, work with absolute value of z
z= numpy.abs(z)
# Grid edges
if R_min is None:
R_min= numpy.amax([numpy.mean(R)-num_std*numpy.std(R),
numpy.amin(R)])
if R_max is None:
R_max= numpy.amin([numpy.mean(R)+num_std*numpy.std(R),
numpy.amax(R)])
if z_max is None:
z_max= numpy.amin([numpy.mean(z)+num_std*numpy.std(z),
numpy.amax(z)])
z_min= 0. #Always start grid at z=0 for stars close to plane
#Separate the coodinates into outliers and normal points
#Define outliers as points outside of grid
mask= numpy.any([R < R_min, R > R_max, z > z_max],axis = 0)
outliers_R= R[mask]
outliers_z= z[mask]
normal_R= R[~mask]
normal_z= z[~mask]
#Sample the velocity of outliers directly (without interpolation)
outlier_coord_v= numpy.empty((outliers_R.size, 3))
for i in range(outliers_R.size):
outlier_coord_v[i]= self.sampleV(outliers_R[i], outliers_z[i],
use_physical=False)[0]
#Prepare for optimizing maxVT on a grid
#Get the new hash of the parameters of grid
new_hash= hashlib.md5(numpy.array([R_min,R_max,z_max,R_pixel,z_pixel])).hexdigest()
#Reuse old interpolated object if new hash matches the old one
if new_hash == self._maxVT_hash:
ip_max_vT= self._maxVT_ip
#Generate a new interpolation object if different from before
else:
R_number= int((R_max - R_min)/R_pixel)
z_number= int((z_max - z_min)/z_pixel)
R_linspace= numpy.linspace(R_min, R_max, R_number)
z_linspace= numpy.linspace(z_min, z_max, z_number)
Rv, zv= numpy.meshgrid(R_linspace, z_linspace)
grid= numpy.dstack((Rv, zv)) #This grid stores (R,z) coordinate
#Grid is a 3 dimensional array since it stores pairs of values, but
#grid max vT is a 2 dimensinal array
grid_max_vT= numpy.empty((grid.shape[0], grid.shape[1]))
#Optimize max_vT on the grid
for i in range(z_number):
for j in range(R_number):
R, z= grid[i][j]
grid_max_vT[i][j]= numpy.squeeze(\
optimize.fmin_powell((lambda x: -self(
R,0.,x,z,0.,log=True,
use_physical=False)),1.))
#Determine degree of interpolation
ky= numpy.min([R_number-1,3])
kx= numpy.min([z_number-1,3])
#Generate interpolation object
ip_max_vT= interpolate.RectBivariateSpline(z_linspace,R_linspace,
grid_max_vT,kx=kx,ky=ky)
#Store interpolation object
self._maxVT_ip= ip_max_vT
#Update hash of parameters
self._maxVT_hash= new_hash
#Evaluate interpolation object to get maxVT at the normal coordinates
normal_max_vT= ip_max_vT.ev(normal_z, normal_R)
#Sample all 3 velocities at a normal point and use interpolated vT
normal_coord_v= \
self._sampleV_preoptimized(normal_R,normal_z,normal_max_vT)
#Combine normal and outlier result, preserving original order
coord_v[mask]= outlier_coord_v
coord_v[~mask]= normal_coord_v
if use_physical and not vo is None:
if _APY_UNITS:
return units.Quantity(coord_v*vo,unit=units.km/units.s)
else:
return coord_v*vo
else:
return coord_v
def _sampleV_preoptimized(self,R,z,maxVT):
"""
NAME:
_sampleV_preoptimized
PURPOSE:
sample a radial, azimuthal, and vertical velocity at R,z;
R,z can be an array of positions maxVT is already optimized
INPUT:
R - Galactocentric distance (can be Quantity)
z - height (can be Quantity)
maxVT - an array of pre-optimized maximum vT at corresponding R,z
OUTPUT:
a numpy array containing the sampled velocity, (vR, vT, vz),
where each row correspond to the row of (R,z)
HISTORY:
2018-08-09 - Written - Samuel Wong (University of Toronto)
"""
length = numpy.size(R)
out= numpy.empty((length,3)) #Initialize output
#Determine the maximum of the velocity distribution
maxVR= numpy.zeros(length)
maxVz= numpy.zeros(length)
logmaxVD= self(R,maxVR,maxVT,z,maxVz,log=True,use_physical=False)
#Now rejection-sample
#Intiialize boolean index of position remaining to be sampled
remain_indx = numpy.full(length,True)
while numpy.any(remain_indx):
nmore= numpy.sum(remain_indx)
propvR= numpy.random.normal(size=nmore)*2.*self._sr
propvT= numpy.random.normal(size=nmore)*2.*self._sr+maxVT[remain_indx]
propvz= numpy.random.normal(size=nmore)*2.*self._sz
VDatprop= self(R[remain_indx],propvR,propvT,z[remain_indx],propvz,
log=True, use_physical=False)-logmaxVD[remain_indx]
VDatprop-= -0.5*(propvR**2./4./self._sr**2.+
propvz**2./4./self._sz**2.+
(propvT-maxVT[remain_indx])**2./4./self._sr**2.)
accept_indx= (VDatprop > numpy.log(numpy.random.random(size=nmore)))
vR_accept= propvR[accept_indx]
vT_accept= propvT[accept_indx]
vz_accept= propvz[accept_indx]
#Get the indexing of rows of output array that need to be updated
#with newly accepted velocity
to_change= numpy.copy(remain_indx)
to_change[remain_indx]= accept_indx
out[to_change]= numpy.stack((vR_accept,vT_accept,vz_accept), axis = 1)
#Removing accepted sampled from remain index
remain_indx[remain_indx]= ~accept_indx
return out
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity2',pop=True)
def pvR(self,vR,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.,vTmax=1.5):
"""
NAME:
pvR
PURPOSE:
calculate the marginalized vR probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma_z(R) for integration over vz (default: 4)
vTmax - sets integration limits to [0,vTmax] for integration over vT (default: 1.5)
OUTPUT:
p(vR,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vzglw= glw
vzfac= nsigma*sigmaz1 #2 x integration over [0,nsigma*sigmaz1]
else:
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vzfac = 0.5*nsigma*sigmaz1 #integration over [-nsigma*sigmaz1,0] and [0,nsigma*sigmaz1]
vTgl= vTmax/2.*(glx+1.)
vTfac= 0.5 * vTmax #integration over [0.,vTmax]
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vTglw= numpy.tile(glw,(ngl,1)).T #also tile weights
vzglw= numpy.tile(vzglw,(ngl,1))
#evaluate
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vR+numpy.zeros(ngl*ngl),
vTgl.flatten(),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True,
use_physical=False),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vTglw*vzglw*vzfac)*vTfac
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity2',pop=True)
def pvT(self,vT,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.):
"""
NAME:
pvT
PURPOSE:
calculate the marginalized vT probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma(R) for integration over vz and vR (default: 4)
OUTPUT:
p(vT,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor nsigma^2/4 - Trick (MPA)
"""
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vRglw= glw
vzglw= glw
vRfac= nsigma*sigmaR1 #2 x integration over [0,nsigma*sigmaR1]
vzfac= nsigma*sigmaz1 #2 x integration over [0,nsigma*sigmaz1]
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vRfac = 0.5*nsigma*sigmaR1 #integration over [-nsigma*sigmaR1,0] and [0,nsigma*sigmaR1]
vzfac = 0.5*nsigma*sigmaz1 #integration over [-nsigma*sigmaz1,0] and [0,nsigma*sigmaz1]
#Tile everything
vRgl= numpy.tile(vRgl,(ngl,1)).T
vzgl= numpy.tile(vzgl,(ngl,1))
vRglw= numpy.tile(vRglw,(ngl,1)).T #also tile weights
vzglw= numpy.tile(vzglw,(ngl,1))
#evaluate
logqeval= numpy.reshape(self(R+numpy.zeros(ngl*ngl),
vRgl.flatten(),
vT+numpy.zeros(ngl*ngl),
z+numpy.zeros(ngl*ngl),
vzgl.flatten(),
log=True,
use_physical=False),
(ngl,ngl))
return numpy.sum(numpy.exp(logqeval)*vRglw*vzglw*vRfac*vzfac)
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity2',pop=True)
def pvz(self,vz,R,z,gl=True,ngl=_DEFAULTNGL2,
nsigma=4.,vTmax=1.5,
_return_actions=False,_jr=None,_lz=None,_jz=None,
_return_freqs=False,
_rg=None,_kappa=None,_nu=None,_Omega=None,
_sigmaR1=None):
"""
NAME:
pvz
PURPOSE:
calculate the marginalized vz probability at this location (NOT normalized by the density)
INPUT:
vz - vertical velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma_R(R) for integration over vR (default: 4)
vTmax - sets integration limits to [0,vTmax] for integration over vT (default: 1.5)
OUTPUT:
p(vz,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
"""
if _sigmaR1 is None:
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
else:
sigmaR1= _sigmaR1
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= (glx+1.)
vRglw= glw
vRfac= nsigma*sigmaR1 #2 x integration over [0,nsigma*sigmaR1]
else:
vRgl= (glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-(glx12+1.))
vRgl= numpy.array(vRgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vRfac = 0.5*nsigma*sigmaR1 #integration over [-nsigma*sigmaR1,0] and [0,nsigma*sigmaR1]
vTgl= vTmax/2.*(glx+1.)
vTfac= 0.5 * vTmax #integration over [0.,vTmax]
#Tile everything
vTgl= numpy.tile(vTgl,(ngl,1)).T
vRgl= numpy.tile(vRgl,(ngl,1))
vTglw= numpy.tile(glw,(ngl,1)).T #also tile weights
vRglw= numpy.tile(vRglw,(ngl,1))
#If inputs are arrays, tile
if isinstance(R,numpy.ndarray):
nR= len(R)
R= numpy.tile(R,(ngl,ngl,1)).T.flatten()
z= numpy.tile(z,(ngl,ngl,1)).T.flatten()
vz= numpy.tile(vz,(ngl,ngl,1)).T.flatten()
vTgl= numpy.tile(vTgl,(nR,1,1)).flatten()
vRgl= numpy.tile(vRgl,(nR,1,1)).flatten()
vTglw= numpy.tile(vTglw,(nR,1,1))
vRglw= numpy.tile(vRglw,(nR,1,1))
scalarOut= False
else:
R= R+numpy.zeros(ngl*ngl)
z= z+numpy.zeros(ngl*ngl)
vz= vz+numpy.zeros(ngl*ngl)
nR= 1
scalarOut= True
vRgl= vRgl.flatten()
vRgl*= numpy.tile(nsigma*sigmaR1/2.,(ngl,ngl,1)).T.flatten()
#evaluate
if _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R,
vRgl.flatten(),
vTgl.flatten(),
z,
vz,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif not _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif not _jr is None and _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self((_jr,_lz,_jz),
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
elif _jr is None and not _rg is None:
logqeval, jr, lz, jz, rg, kappa, nu, Omega= self(R,
vRgl.flatten(),
vTgl.flatten(),
z,
vz,
rg=_rg,kappa=_kappa,nu=_nu,
Omega=_Omega,
log=True,
_return_actions=True,
_return_freqs=True,
use_physical=False)
logqeval= numpy.reshape(logqeval,(nR,ngl*ngl))
vRglw= numpy.reshape(vRglw,(nR,ngl*ngl))
vTglw= numpy.reshape(vTglw,(nR,ngl*ngl))
if scalarOut:
result= numpy.sum(numpy.exp(logqeval)*vTglw*vRglw,axis=1)[0]*vRfac*vTfac
else:
result= numpy.sum(numpy.exp(logqeval)*vTglw*vRglw,axis=1)*vRfac*vTfac
if _return_actions and _return_freqs:
return (result,
jr,lz,jz,
rg, kappa, nu, Omega)
elif _return_freqs:
return (result,
rg, kappa, nu, Omega)
elif _return_actions:
return (result,
jr,lz,jz)
else:
return result
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity',pop=True)
def pvRvT(self,vR,vT,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.):
"""
NAME:
pvRvT
PURPOSE:
calculate the marginalized (vR,vT) probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (can be Quantity)
vT - tangential velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma_z(R) for integration over vz (default: 4)
OUTPUT:
p(vR,vT,R,z)
HISTORY:
2013-01-02 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor nsigma/2 - Trick (MPA)
"""
sigmaz1= self._sz*numpy.exp((self._refr-R)/self._hsz)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vzgl= nsigma*sigmaz1/2.*(glx+1.)
vzglw= glw
vzfac= nsigma*sigmaz1 #2 x integration over [0,nsigma*sigmaz1]
else:
vzgl= nsigma*sigmaz1/2.*(glx12+1.)
vzgl= list(vzgl)
vzgl.extend(-nsigma*sigmaz1/2.*(glx12+1.))
vzgl= numpy.array(vzgl)
vzglw= glw12
vzglw= list(vzglw)
vzglw.extend(glw12)
vzglw= numpy.array(vzglw)
vzfac = 0.5*nsigma*sigmaz1 #integration over [-nsigma*sigmaz1,0] and [0,nsigma*sigmaz1]
#evaluate
logqeval= self(R+numpy.zeros(ngl),
vR+numpy.zeros(ngl),
vT+numpy.zeros(ngl),
z+numpy.zeros(ngl),
vzgl,
log=True,use_physical=False)
return numpy.sum(numpy.exp(logqeval)*vzglw*vzfac)
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity',pop=True)
def pvTvz(self,vT,vz,R,z,gl=True,ngl=_DEFAULTNGL2,nsigma=4.):
"""
NAME:
pvTvz
PURPOSE:
calculate the marginalized (vT,vz) probability at this location (NOT normalized by the density)
INPUT:
vT - tangential velocity (can be Quantity)
vz - vertical velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
nsigma - sets integration limits to [-1,+1]*nsigma*sigma_R(R) for integration over vR (default: 4)
OUTPUT:
p(vT,vz,R,z)
HISTORY:
2012-12-22 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor nsigma/2 - Trick (MPA)
"""
sigmaR1= self._sr*numpy.exp((self._refr-R)/self._hsr)
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
if isinstance(self._aA,(actionAngle.actionAngleAdiabatic,
actionAngle.actionAngleAdiabaticGrid)):
vRgl= nsigma*sigmaR1/2.*(glx+1.)
vRglw= glw
vRfac= nsigma*sigmaR1 #2 x integration over [0,nsigma*sigmaR1]
else:
vRgl= nsigma*sigmaR1/2.*(glx12+1.)
vRgl= list(vRgl)
vRgl.extend(-nsigma*sigmaR1/2.*(glx12+1.))
vRgl= numpy.array(vRgl)
vRglw= glw12
vRglw= list(vRglw)
vRglw.extend(glw12)
vRglw= numpy.array(vRglw)
vRfac = 0.5*nsigma*sigmaR1 #integration over [-nsigma*sigmaR1,0] and [0,nsigma*sigmaR1]
#evaluate
logqeval= self(R+numpy.zeros(ngl),
vRgl,
vT+numpy.zeros(ngl),
z+numpy.zeros(ngl),
vz+numpy.zeros(ngl),
log=True,use_physical=False)
return numpy.sum(numpy.exp(logqeval)*vRglw*vRfac)
@actionAngle_physical_input
@physical_conversion('phasespacedensityvelocity',pop=True)
def pvRvz(self,vR,vz,R,z,gl=True,ngl=_DEFAULTNGL2,vTmax=1.5):
"""
NAME:
pvR
PURPOSE:
calculate the marginalized (vR,vz) probability at this location (NOT normalized by the density)
INPUT:
vR - radial velocity (can be Quantity)
vz - vertical velocity (can be Quantity)
R - radius (can be Quantity)
z - height (can be Quantity)
gl - use Gauss-Legendre integration (True, currently the only option)
ngl - order of Gauss-Legendre integration
vTmax - sets integration limits to [0,vTmax] for integration over vT (default: 1.5)
OUTPUT:
p(vR,vz,R,z)
HISTORY:
2013-01-02 - Written - Bovy (IAS)
2018-01-12 - Added Gauss-Legendre integration prefactor vTmax/2 - Trick (MPA)
"""
if gl:
if ngl % 2 == 1:
raise ValueError("ngl must be even")
#Use Gauss-Legendre integration for all
if ngl == _DEFAULTNGL:
glx, glw= self._glxdef, self._glwdef
glx12, glw12= self._glxdef12, self._glwdef12
elif ngl == _DEFAULTNGL2:
glx, glw= self._glxdef2, self._glwdef2
glx12, glw12= self._glxdef, self._glwdef
else:
glx, glw= numpy.polynomial.legendre.leggauss(ngl)
glx12, glw12= numpy.polynomial.legendre.leggauss(ngl//2)
#Evaluate everywhere
vTgl= vTmax/2.*(glx+1.)
vTglw= glw
vTfac= 0.5 * vTmax #integration over [0.,vTmax]
#If inputs are arrays, tile
if isinstance(R,numpy.ndarray):
nR= len(R)
R= numpy.tile(R,(ngl,1)).T.flatten()
z= numpy.tile(z,(ngl,1)).T.flatten()
vR= numpy.tile(vR,(ngl,1)).T.flatten()
vz= numpy.tile(vz,(ngl,1)).T.flatten()
vTgl= numpy.tile(vTgl,(nR,1)).flatten()
vTglw= numpy.tile(vTglw,(nR,1))
scalarOut= False
else:
R= R+numpy.zeros(ngl)
vR= vR+numpy.zeros(ngl)
z= z+numpy.zeros(ngl)
vz= vz+numpy.zeros(ngl)
nR= 1
scalarOut= True
#evaluate
logqeval= numpy.reshape(self(R,
vR,
vTgl,
z,
vz,
log=True,
use_physical=False),
(nR,ngl))
out= numpy.sum(numpy.exp(logqeval)*vTglw*vTfac,axis=1)
if scalarOut: return out[0]
else: return out
def _calc_epifreq(self,r):
"""
NAME:
_calc_epifreq
PURPOSE:
calculate the epicycle frequency at r
INPUT:
r - radius
OUTPUT:
kappa
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
takes about 0.1 ms for a Miyamoto-Nagai potential
"""
return potential.epifreq(self._pot,r)
def _calc_verticalfreq(self,r):
"""
NAME:
_calc_verticalfreq
PURPOSE:
calculate the vertical frequency at r
INPUT:
r - radius
OUTPUT:
nu
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
takes about 0.05 ms for a Miyamoto-Nagai potential
"""
return potential.verticalfreq(self._pot,r)
def _rg(self,lz):
"""
NAME:
_rg
PURPOSE:
calculate the radius of a circular orbit of Lz
INPUT:
lz - Angular momentum
OUTPUT:
radius
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
NOTE:
seems to take about ~0.5 ms for a Miyamoto-Nagai potential;
~0.75 ms for a MWPotential
about the same with or without interpolation of the rotation curve
Not sure what to do about negative lz...
"""
if isinstance(lz,numpy.ndarray):
indx= (lz > self._precomputergLzmax)*(lz < self._precomputergLzmin)
indxc= True^indx
out= numpy.empty(lz.shape)
out[indxc]= self._rgInterp(lz[indxc])
out[indx]= numpy.array([potential.rl(self._pot,lz[indx][ii]) for ii in range(numpy.sum(indx))])
return out
else:
if lz > self._precomputergLzmax or lz < self._precomputergLzmin:
return potential.rl(self._pot,lz)
return numpy.atleast_1d(self._rgInterp(lz))
def _vmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,
use_physical=False)
def _vmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False)*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.)
def _jmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False,
func= (lambda x,y,z: x**n*y**m*z**o))
def _jmomentsurfaceMCIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,mvT,n,m,o):
"""Internal function that is the integrand for the vmomentsurface mass integration"""
return df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1,use_physical=False,
func=(lambda x,y,z: x**n*y**m*z**o))\
*numpy.exp(vR**2./2.+(vT-mvT)**2./2.+vz**2./2.)
| {
"repo_name": "jobovy/galpy",
"path": "galpy/df/quasiisothermaldf.py",
"copies": "1",
"size": "96610",
"license": "bsd-3-clause",
"hash": 2030695931108671200,
"line_mean": 37.2008699091,
"line_max": 180,
"alpha_frac": 0.4852085705,
"autogenerated": false,
"ratio": 3.8296269869584174,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4814835557458418,
"avg_score": null,
"num_lines": null
} |
"""A biologically-inspired model of visual perception."""
from math import exp, hypot
import logging
import numpy as np
import cv2
import cv2.cv as cv
from collections import OrderedDict, deque
from itertools import izip
#import pyNN.neuron as sim
from lumos.context import Context
from lumos.util import Enum, getNormMap
from lumos.input import Projector, run
from lumos import rpc
from ..util.buffer import InputBuffer, OutputBuffer, BidirectionalBuffer, BufferAccessError
from ..neuron import Neuron, Population, Projection, neuron_inhibition_period, Uniform, MultivariateUniform, MultivariateNormal, NeuronMonitor, plotPopulations
from .photoreceptor import Rod, Cone
from .simplified.visual_cortex import SalienceNeuron, SelectionNeuron, FeatureNeuron
from ..motion.ocular import EmulatedOcularMotionSystem
# Global variables
default_feature_weight = 0.9 # default weight for a feature pathway, treated as update probability for its neurons
default_feature_weight_rest = 0.25 # default weight for features other than the ones desired
# Global GUI options
default_window_flags = cv2.WINDOW_AUTOSIZE | 0x00000010 # CV_GUI_NORMAL = 0x00000010
# Global initialization
np.set_printoptions(precision=4, linewidth=120) # for printing feature vectors: a few decimal places are fine; try not to break lines, especially in log files
class VisualFeaturePathway(object):
"""A collection of connected neuron populations that together compute a particular visual feature."""
def __init__(self, label, populations, projections, output=None, p=default_feature_weight, timeNow=0.0):
self.label = label
self.logger = logging.getLogger("{}-pathway".format(self.label))
self.populations = populations # order of populations matters here; this is the order in which they will be updated
self.projections = projections
#assert output in self.populations # usually, output is a population, but it can be something else
self.output = output
self.timeNow = timeNow
# * Top-level interface (TODO add neuron response/spike frequency as measure of strength)
self.active = True # used to selectively update specific pathways
self.p = p # update probability
self.selectedNeuron = None # the last selected SelectionNeuron, mainly for display and top-level output
self.selectedTime = 0.0 # corresponding timestamp
self.logger.debug("Initialized {}".format(self))
def update(self, timeNow):
self.timeNow = timeNow
# feature pathway specific updates may need to be carried out externally
def __str__(self):
return "{obj.label}-pathway: active: {obj.active}, p: {obj.p}, output: {output}".format(obj=self, output=(self.output.neurons[0].potential if self.output is not None and len(self.output.neurons) > 0 else None))
class Finst(object):
"""Finger of INSTantiation: A percept defined by a location in allocentric space, used for modulating attention."""
max_activation = 1.0
half_life = 5.0
min_good_activation = 0.1 # FINSTs with activation less than this could be discarded
default_radius = 100
def __init__(self, location, focusPoint, radius=None, timeCreated=0.0, activationCreated=max_activation):
self.location = location # egocentric fixation location at time of creation
self.focusPoint = focusPoint # allocentric focus point at time of creation
self.radius = radius if radius is not None else self.default_radius # an indicator of size
self.timeCreated = timeCreated # creation time
self.activationCreated = activationCreated # a measure of the strength of the FINST upon creation
self.inhibitionMap = getNormMap(self.radius * 2, sigma=self.radius / 3.0) # soft inhibition map based on Normal PDF
self.update(timeCreated)
def update(self, timeNow):
deltaTime = timeNow - self.timeCreated
self.activation = self.activationCreated / (2 ** (deltaTime / self.half_life))
def getAdjustedLocation(self, focusPoint):
return (self.location[0] + self.focusPoint[0] - focusPoint[0], self.location[1] + self.focusPoint[1] - focusPoint[1])
def __str__(self):
return "<loc: {self.location}, focus: {self.focusPoint}, act: {self.activation:.3f}>".format(self=self)
class VisualSystem(object):
"""Complete system for processing dynamic visual input."""
State = Enum(('NONE', 'FREE', 'SACCADE', 'FIXATE'))
intents = ['find', 'hold', 'release', 'reset'] # all supported intents
default_image_size = (256, 256) # (width, height) TODO read from context options
num_rods = 10000 # human: 90-120 million
num_cones = 1000 # human: 4.5-6 million
num_bipolar_cells = 2000
num_ganglion_cells = 1000
num_salience_neurons = 400
num_selection_neurons = 100
num_feature_neurons = 2 # no. of feature neurons per pathway, more implies finer feature resolution
num_finsts = 5 # no. of visual FINSTs
finst_decay_enabled = False # if enabled, FINST activations will be updated and those with low activation will be purged
finst_inhibition_enabled = True # if active FINST locations are inhibited
max_free_duration = 2.0 # artificial bound to prevent no results in case of very low salience inputs
min_saccade_duration = 0.05 # human: 0.02s (20ms)
#max_saccade_duration = 0.5 # human: 0.2s (200ms); not used as we end saccade period when ocular motion stops
min_fixation_duration = 0.5 # human: 0.1s (100ms), varies based by activity
max_fixation_duration = 3.0 # human: 0.5s (500ms), varies considerably by activity, affected by cognitive control
max_hold_duration = 5.0
min_good_salience = 0.66 # recommended values: 0.66 (filters out most unwanted regions)
min_saccade_salience = 0.175 # minimum salience required to make a saccade to (otherwise reset to center)
foveal_radius_ratio = 0.2 # fraction of distance from center to corners of the retina that is considered to be in foveal region
#default_fovea_size = (int(foveal_radius_ratio * default_image_size[0]), int(foveal_radius_ratio * default_image_size[1]))
default_fovea_size = (100, 100) # fixed size; specify None to compute using foveal radius and image size in __init__()
central_radius_ratio = 0.5 # radius to mark central region where visual acuity is modest and then falls off with eccentricity
def __init__(self, imageSize=default_image_size, foveaSize=default_fovea_size, timeNow=0.0, showMonitor=None, ocularMotionSystem=None):
# * Get context and logger
self.context = Context.getInstance()
self.logger = logging.getLogger(self.__class__.__name__)
# * Accept arguments, read parameters (TODO)
self.imageSize = imageSize # (width, height)
self.foveaSize = foveaSize
self.timeNow = timeNow
self.ocularMotionSystem = ocularMotionSystem # for eye movements, if available
# * System state
self.state = self.State.NONE
self.lastTransitionTime = self.timeNow
self.hold = False # hold gaze at a fixed location?
# * Structural/spatial members
self.bounds = np.float32([[0.0, 0.0, 2.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 4.0]])
self.center = (self.bounds[0] + self.bounds[1]) / 2
# * Images and related members (TODO do we need to initialize these at all? - new images are generated every update)
self.imageCenter = (self.imageSize[1] / 2, self.imageSize[0] / 2)
self.fovealRadius = hypot(self.imageCenter[0], self.imageCenter[1]) * self.foveal_radius_ratio
if self.foveaSize is None:
self.foveaSize = (int(self.fovealRadius * 2), int(self.fovealRadius * 2))
self.fovealSlice = np.index_exp[int(self.imageCenter[1] - self.foveaSize[1] / 2):int(self.imageCenter[1] + self.foveaSize[1] / 2), int(self.imageCenter[0] - self.foveaSize[0] / 2):int(self.imageCenter[0] + self.foveaSize[0] / 2)]
self.fixationSlice = self.fovealSlice
self.imageShapeC3 = (self.imageSize[1], self.imageSize[0], 3) # numpy shape for 3 channel images
self.imageShapeC1 = (self.imageSize[1], self.imageSize[0]) # numpy shape for single channel images
# NOTE Image shapes (h, w, 1) and (h, w) are not compatible unless we use keepdims=True for numpy operations
self.imageTypeInt = np.uint8 # numpy dtype for integer-valued images
self.imageTypeFloat = np.float32 # numpy dtype for real-valued images
self.images = OrderedDict()
# ** RGB and HSV images
self.images['BGR'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['HSV'] = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
self.images['H'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
self.images['V'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt)
# ** Rod and Cone response images (frequency/hue-dependent)
self.images['Rod'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone'] = OrderedDict() # NOTE dict keys must match names of Cone.cone_types (should this be flattened?)
self.images['Cone']['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone']['M'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Cone']['L'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Bipolar cell response images
# NOTE Rod bipolars are ON-center only; they connect to OFF-center Ganglion cells to initiate the dark pathway
# Here, an OFF map is computed from the ON map in order to simplify computation only
self.images['Bipolar'] = OrderedDict()
self.images['Bipolar']['ON'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['OFF'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['S'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['M'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.images['Bipolar']['L'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Ganglion cell response images, the source of cortical feature channels
# TODO Add more Ganglion cell types with different receptive field properties
# 'RG' +Red -Green
# 'GR' +Green -Red
# 'RB' +Red -Blue
# 'BR' +Blue -Red
# 'BY' +Blue -Yellow
# 'YB' +Yellow -Blue
# 'WK' +White -Black (currently 'ON')
# 'KW' +Black -White (currently 'OFF')
# NOTE R = L cones, G = M cones, B = S cones
self.ganglionTypes = ['ON', 'OFF', 'RG', 'GR', 'RB', 'BR', 'BY', 'YB']
self.featurePlotColors = {'ON': 'gray', 'OFF': 'black', 'RG': 'red', 'GR': 'green', 'RB': 'tomato', 'BR': 'blue', 'BY': 'magenta', 'YB': 'gold'}
self.numGanglionTypes = np.int_(len(self.ganglionTypes)) # TODO use a single num-features parameter across the board?
self.numGanglionTypes_inv = 1.0 / self.imageTypeFloat(self.numGanglionTypes) # [optimization: frequently used quantity]
self.images['Ganglion'] = OrderedDict()
for ganglionType in self.ganglionTypes:
self.images['Ganglion'][ganglionType] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
# ** Combined response (salience) image (and related variables)
self.images['Salience'] = np.zeros(self.imageShapeC1, dtype=self.imageTypeFloat)
self.maxSalience = 0.0
self.maxSalienceLoc = (-1, -1)
# ** Spatial weight map with a central soft spotlight (use np.ogrid?)
self.images['Weight'] = getNormMap(self.imageSize[0], sigma=self.imageSize[0] / 2.0) # X-Y symmetric
# * Image processing elements
self.bipolarBlurSize = (11, 11) # size of blurring kernel used when computing Bipolar cell response
self.ganglionCenterSurroundKernel = self.imageTypeFloat(
[ [ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, 7, 9, 7, -1, -1 ],
[ -1, -1, 7, 7, 7, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ],
[ -1, -1, -1, -1, -1, -1, -1 ] ])
self.ganglionCenterSurroundKernel /= np.sum(self.ganglionCenterSurroundKernel) # normalize
#self.logger.info("Ganglion center-surround kernel:\n{}".format(self.ganglionCenterSurroundKernel)) # [debug]
self.ganglionKernelLevels = 4
self.ganglionKernels = [None] * self.ganglionKernelLevels
self.ganglionKernels[0] = self.ganglionCenterSurroundKernel
for i in xrange(1, self.ganglionKernelLevels):
self.ganglionKernels[i] = cv2.resize(self.ganglionKernels[i - 1], dsize=None, fx=2, fy=2)
self.ganglionKernels[i] /= np.sum(self.ganglionKernels[i]) # normalize
#self.logger.info("Ganglion center-surround kernel sizes ({} levels): {}".format(self.ganglionKernelLevels, ", ".join("{}".format(k.shape) for k in self.ganglionKernels))) # [debug]
# * Neuron Populations and Projections connecting them
self.populations = OrderedDict() # dict with key = population label
self.projections = OrderedDict() # mapping from (pre_label, post_label) => projection object
# ** Retinal layers (TODO move this to a separate Retina class?)
self.createRetina()
# ** Layers in the Visual Cortex (TODO move this to a separate VisualCortex class?)
self.createVisualCortex() # creates and populates self.featurePathways
# * Eye movement
self.saccadeSalience = 0.0 # salience of last location we moved to
self.saccadeTarget = (0, 0) # center-relative
#self.lastSaccadeTime = self.timeNow # [unused]
self.fixationLoc = None # not None when fixated
# * FINSTs for maintaining attended locations
self.finsts = deque(maxlen=self.num_finsts)
# * Output image and plots
self.imageOut = None
if self.context.options.gui:
#self.imageOut = np.zeros(self.imageShapeC3, dtype=self.imageTypeInt)
cv2.namedWindow("Input", flags=default_window_flags)
cv2.namedWindow("Retina", flags=default_window_flags)
cv2.namedWindow("Output", flags=default_window_flags)
if self.context.options.debug:
for pathwayLabel in self.featurePathways.iterkeys():
cv2.namedWindow("{} Salience".format(pathwayLabel), flags=default_window_flags)
# TODO Salience and selection output will be for each feature pathway (but the same can be rendered to, displayed and reused)
self.imageSalienceOut = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # salience neuron outputs
self.imageSalienceOutCombined = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # salience neuron outputs, all pathways combined
#self.imageSelectionOut = np.zeros(self.imageShapeC1, dtype=self.imageTypeInt) # selection neuron outputs
if showMonitor is None:
showMonitor = self.context.options.gui and self.context.options.debug
if showMonitor:
self.neuronPotentialMonitor = NeuronMonitor(show_legend=False)
for pathwayLabel, featurePathway in self.featurePathways.iteritems():
# Monitor single feature neuron
#self.neuronPotentialMonitor.addChannel(label=pathwayLabel, obj=featurePathway.output.neurons[0], color=self.featurePlotColors[pathwayLabel]) # very hard-coded way to access single output neuron!
# Monitor all feature neurons
for idx, outputNeuron in enumerate(featurePathway.output.neurons):
self.neuronPotentialMonitor.addChannel(label="{}_{}".format(pathwayLabel, idx), obj=outputNeuron, color=self.featurePlotColors[pathwayLabel])
self.neuronPotentialMonitor.start()
# * Buffers - mainly for communication with high-level (cognitive) architectures, other modules
# TODO Initialize all buffers with proper values
self.buffers = OrderedDict()
self.buffers['state'] = OutputBuffer(self.state)
self.buffers['intent'] = InputBuffer(self.handleIntent) # receive intent in a callable method
self.buffers['location'] = BidirectionalBuffer((0, 0)) # center-relative
self.buffers['size'] = BidirectionalBuffer((0, 0))
self.buffers['features'] = BidirectionalBuffer()
self.buffers['weights'] = InputBuffer()
self.buffers['salience'] = OutputBuffer(0.0)
self.buffers['match'] = OutputBuffer(0.0)
# * Once initialized, start in FREE state
self.transition(self.State.FREE)
def initialize(self, imageIn, timeNow):
pass # to emulate FrameProcessor-like interface
def process(self, imageIn, timeNow):
self.timeNow = timeNow
self.images['BGR'][:] = imageIn # NOTE: must be pre-allocated and of the same (compatible) shape as imageIn
if self.context.options.gui:
cv2.imshow("Retina", self.images['BGR'])
# * State-based pre-processing
if self.state == self.State.SACCADE:
# Check for saccade end
if self.timeNow > (self.lastTransitionTime + self.min_saccade_duration) and not self.ocularMotionSystem.isMoving:
self.transition(self.State.FIXATE) # TODO: transition to an intermediate state to check for successful saccade completion
else:
return True, self.imageOut # saccadic suppression - skip further processing if performing a saccade
# * TODO Read input buffers
weights = self.buffers['weights'].get_in(clear=True)
if weights is not None:
self.updateFeatureWeights(weights)
# * Get HSV
self.images['HSV'] = cv2.cvtColor(self.images['BGR'], cv2.COLOR_BGR2HSV)
self.images['H'], self.images['S'], self.images['V'] = cv2.split(self.images['HSV'])
# * Compute Rod and Cone responses
# TODO: Need non-linear response to hue, sat, val (less dependent on sat, val for cones)
# NOTE: Somehow, PhotoreceptorType.hue must be a numpy array, even if it is length 1, otherwise we hit a TypeError: <unknown> is not a numpy array!
self.images['Rod'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Rod.rod_type.hue) % 180) * 255 * self.images['V'] * Rod.rod_type.responseFactor # hack: use constant sat = 200 to make response independent of saturation
self.images['Cone']['S'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[0].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[0].responseFactor
self.images['Cone']['M'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[1].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[1].responseFactor
self.images['Cone']['L'] = self.imageTypeFloat(180 - cv2.absdiff(self.images['H'], Cone.cone_types[2].hue) % 180) * self.images['S'] * self.images['V'] * Cone.cone_types[2].responseFactor
# * Compute Bipolar and Ganglion cell responses
# ** Bipolar responses: Rods
# NOTE Blurring is a step that is effectively achieved in biology by horizontal cells
imageRodBlurred = cv2.blur(self.images['Rod'], self.bipolarBlurSize)
self.images['Bipolar']['ON'] = np.clip(self.images['Rod'] - 0.75 * imageRodBlurred, 0.0, 1.0)
self.images['Bipolar']['OFF'] = np.clip((1.0 - self.images['Rod']) - 0.9 * (1.0 - imageRodBlurred), 0.0, 1.0) # same as (1 - ON response)? (nope)
# ** Bipolar responses: Cones
# TODO Add multiscale Cone Bipolars to prevent unwanted response to diffuse illumination
imagesConeSBlurred = cv2.blur(self.images['Cone']['S'], self.bipolarBlurSize)
imagesConeMBlurred = cv2.blur(self.images['Cone']['M'], self.bipolarBlurSize)
imagesConeLBlurred = cv2.blur(self.images['Cone']['L'], self.bipolarBlurSize)
self.images['Bipolar']['S'] = np.clip(self.images['Cone']['S'] - 0.75 * imagesConeSBlurred, 0.0, 1.0)
self.images['Bipolar']['M'] = np.clip(self.images['Cone']['M'] - 0.75 * imagesConeMBlurred, 0.0, 1.0)
self.images['Bipolar']['L'] = np.clip(self.images['Cone']['L'] - 0.75 * imagesConeLBlurred, 0.0, 1.0)
# ** Ganglion cells simply add up responses from a (bunch of) central bipolar cell(s) (ON/OFF) and surrounding antagonistic bipolar cells (OFF/ON)
# *** Method 1: Center - Surround
#imageGanglionCenterON = cv2.filter2D(self.images['Bipolar']['ON'], -1, self.ganglionCenterKernel)
#imageGanglionSurroundOFF = cv2.filter2D(self.images['Bipolar']['OFF'], -1, self.ganglionSurroundKernel)
#self.images['Ganglion']['ON'] = 0.75 * imageGanglionCenterON + 0.25 * imageGanglionSurroundOFF
# *** Method 2: Center-Surround kernel
#self.images['Ganglion']['ON'] = np.clip(cv2.filter2D(self.images['Bipolar']['ON'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
#self.images['Ganglion']['OFF'] = np.clip(cv2.filter2D(self.images['Bipolar']['OFF'], -1, self.ganglionCenterSurroundKernel), 0.0, 1.0)
# *** Method 3: Multi-level Center-Surround kernels, taking maximum
for ganglionImage in self.images['Ganglion'].itervalues():
ganglionImage.fill(0.0) # reset all to zero
for k in self.ganglionKernels:
# Rod pathway
self.images['Ganglion']['ON'] = np.maximum(self.images['Ganglion']['ON'], np.clip(cv2.filter2D(self.images['Bipolar']['ON'], -1, k), 0.0, 1.0))
self.images['Ganglion']['OFF'] = np.maximum(self.images['Ganglion']['OFF'], np.clip(cv2.filter2D(self.images['Bipolar']['OFF'], -1, k), 0.0, 1.0))
# Cone pathway
imageRG = self.images['Bipolar']['L'] - self.images['Bipolar']['M']
imageRB = self.images['Bipolar']['L'] - self.images['Bipolar']['S']
imageBY = self.images['Bipolar']['S'] - (self.images['Bipolar']['L'] + self.images['Bipolar']['M']) / 2
self.images['Ganglion']['RG'] = np.maximum(self.images['Ganglion']['RG'], np.clip(cv2.filter2D(imageRG, -1, k), 0.0, 1.0))
self.images['Ganglion']['GR'] = np.maximum(self.images['Ganglion']['GR'], np.clip(cv2.filter2D(-imageRG, -1, k) * 1.6, 0.0, 1.0)) # TODO: formalize this fixed relative weighting scheme to counter unequal color representation
self.images['Ganglion']['RB'] = np.maximum(self.images['Ganglion']['RB'], np.clip(cv2.filter2D(imageRB, -1, k), 0.0, 1.0))
self.images['Ganglion']['BR'] = np.maximum(self.images['Ganglion']['BR'], np.clip(cv2.filter2D(-imageRB, -1, k), 0.0, 1.0))
self.images['Ganglion']['BY'] = np.maximum(self.images['Ganglion']['BY'], np.clip(cv2.filter2D(imageBY, -1, k), 0.0, 1.0))
self.images['Ganglion']['YB'] = np.maximum(self.images['Ganglion']['YB'], np.clip(cv2.filter2D(-imageBY, -1, k) * 1.6, 0.0, 1.0)) # TODO: also here
# * Compute combined (salience) image; TODO incorporate attention weighting (spatial, as well as by visual feature)
# ** Method 1: Max of all Ganglion cell images
self.images['Salience'].fill(0.0)
for ganglionType, ganglionImage in self.images['Ganglion'].iteritems():
#self.images['Salience'] = np.maximum(self.images['Salience'], ganglionImage)
#self.logger.debug("[Salience] Combining {}".format(self.featurePathways[ganglionType])) # [verbose]
self.images['Salience'] = np.maximum(self.images['Salience'], np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # take maximum, scaled by feature pathway probabilities (for display only)
#self.images['Salience'] = self.images['Salience'] + (self.numGanglionTypes_inv * np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # take normalized sum (mixes up features), scaled by feature pathway probabilities (for display only)
# * Update FINSTs if decay is enabled (otherwise activation doesn't change, FINSTs are purged when there's no more room)
if self.finst_decay_enabled:
for finst in self.finsts:
finst.update(self.timeNow)
# Remove stale FINSTs (TODO: use priority queue, don't depend on FINSTs being sorted by activation)
while self.finsts and self.finsts[0].activation < Finst.min_good_activation:
self.finsts.popleft()
# * Apply inhibition based on FINSTs
if self.finst_inhibition_enabled and self.finsts:
self.logger.debug("Current FINSTs: {}".format(", ".join(str(finst) for finst in self.finsts)))
for finst in self.finsts:
self.inhibitMapAtFinst(self.images['Salience'], finst)
self.images['Salience'] = cv2.blur(self.images['Salience'], (3, 3)) # blur slightly to smooth out specs
self.images['Salience'] *= self.images['Weight'] # effectively reduces salience around the edges (which can sometime give artificially high values due to partial receptive fields)
_, self.maxSalience, _, self.maxSalienceLoc = cv2.minMaxLoc(self.images['Salience']) # find out most salient location (from combined salience map)
self.logger.debug("Max. salience value: {:5.3f} @ {}".format(self.maxSalience, self.maxSalienceLoc)) # [verbose]
# * Compute features along each pathway
if self.context.options.gui and self.context.options.debug:
self.imageSalienceOutCombined.fill(0.0)
for pathwayLabel, featurePathway in self.featurePathways.iteritems():
if featurePathway.active:
# ** Update feature pathway populations (TODO find a more reliable way of grabbing salience and selection neuron populations)
#featurePathway.update(self.timeNow) # currently doesn't do anything, update populations explicitly
salienceNeurons = featurePathway.populations[0]
selectionNeurons = featurePathway.populations[1]
featureNeurons = featurePathway.populations[2]
# *** Salience neurons
for salienceNeuron in salienceNeurons.neurons:
#salienceNeuron.update(timeNow) # update every iteration
#salienceNeuron.updateWithP(timeNow) # update using intrinsic probability (adaptive)
if np.random.uniform() < featurePathway.p: # update using pathway probability (TODO try to make this adaptive?)
salienceNeuron.update(timeNow)
#self.logger.debug("{} Salience neuron potential: {:.3f}, response: {:.3f}, I_e: {}, pixelValue: {}".format(pathwayLabel, salienceNeuron.potential, salienceNeuron.response, salienceNeuron.I_e, salienceNeuron.pixelValue))
# *** Selection neurons (TODO mostly duplicated code, perhaps generalizable?)
for selectionNeuron in selectionNeurons.neurons:
#selectionNeuron.update(timeNow) # update every iteration
#selectionNeuron.updateWithP(timeNow) # update using intrinsic probability (adaptive)
if np.random.uniform() < featurePathway.p: # update using pathway probability (TODO try to make this adaptive?)
selectionNeuron.update(timeNow)
else:
selectionNeuron.potentialAccumulated = 0.0 # clear any accumulated potential, effectively inhibiting the selection neuron
#self.logger.debug("{} Selection neuron potential: {:.3f}, pixelValue: {}".format(pathwayLabel, selectionNeuron.potential, selectionNeuron.pixelValue))
# **** Pick one selection neuron, inhibit others
# TODO Use a top-level feature neuron with graded potential to return activation level
#numUninhibited = 0 # [debug]
for selectionNeuron in selectionNeurons.neurons:
# Render selection neuron's position with response-based pixel value (TODO build receptive field when synapses are made, or later, using a stimulus test phase?)
#if selectionNeuron.pixelValue > 200: print "[{:.2f}] {}".format(timeNow, selectionNeuron) # [debug]
if not selectionNeuron.isInhibited and selectionNeuron.timeLastFired == timeNow: # only deal with uninhibited neurons that just fired in this iteration
#numUninhibitedFired += 1 # [debug]
#cv2.circle(self.imageSelectionOut, (selectionNeuron.pixel[0], selectionNeuron.pixel[1]), self.imageSize[0] / 20, selectionNeuron.pixelValue, cv.CV_FILLED) # only render the one selected neuron, later
featurePathway.selectedNeuron = selectionNeuron
featurePathway.selectedTime = timeNow
featurePathway.selectedNeuron.inhibit(timeNow, neuron_inhibition_period + 0.75) # inhibit selected neuron for a bit longer
break # first uninhibited SelectionNeuron will be our selected neuron
#print "# Uninhibited selection neurons that fired: {}".format(numUninhibitedFired) # [debug]
# *** Feature neuron
for featureNeuron in featureNeurons.neurons:
featureNeuron.update(timeNow) # update every iteration
#featureNeuron.updateWithP(timeNow) # update probabilistically
#self.logger.debug("{} Feature neuron potential: {:.3f}, pixelValue: {}".format(pathwayLabel, featureNeuron.potential, featureNeuron.pixelValue))
# ** Render output images and show them (per feature pathway, better show in debug mode only)
if self.context.options.gui and self.context.options.debug:
# *** Salience neurons
self.imageSalienceOut.fill(0.0)
for salienceNeuron in salienceNeurons.neurons:
# Render salience neuron's receptive field with response-based pixel value (TODO cache int radii and pixel as tuple?)
#cv2.circle(self.imageSalienceOut, (salienceNeuron.pixel[0], salienceNeuron.pixel[1]), np.int_(salienceNeuron.rfRadius), 128) # outer radius of surround as a boundary
cv2.circle(self.imageSalienceOut, (salienceNeuron.pixel[0], salienceNeuron.pixel[1]), np.int_(salienceNeuron.rfCenterRadius), salienceNeuron.pixelValue, cv.CV_FILLED) # inner center field, filled with current value
self.imageSalienceOutCombined = np.maximum(self.imageSalienceOutCombined, self.imageSalienceOut)
# *** Selection neurons
if featurePathway.selectedNeuron is not None and (timeNow - featurePathway.selectedTime) < 3.0:
#self.imageSelectionOut.fill(0.0)
cv2.circle(self.imageSalienceOut, (featurePathway.selectedNeuron.pixel[0], featurePathway.selectedNeuron.pixel[1]), featurePathway.selectedNeuron.rfRadius, int(255 * exp(featurePathway.selectedTime - timeNow)), 2) # draw selected neuron with a shade that fades with time (on salience output image)
#cv2.circle(self.imageSelectionOut, (featurePathway.selectedNeuron.pixel[0], featurePathway.selectedNeuron.pixel[1]), featurePathway.selectedNeuron.rfRadius, int(255 * exp(featurePathway.selectedTime - timeNow)), cv.CV_FILLED) # draw selected neuron with a shade that fades with time
cv2.imshow("{} Salience".format(pathwayLabel), self.imageSalienceOut)
#cv2.imshow("{} Selection".format(pathwayLabel), self.imageSelectionOut)
# * TODO Compute feature vector of attended region
# * Post-processing: Write to output buffers, state-based actions, check for transitions
self.buffers['salience'].set_out(self.maxSalience)
self.buffers['location'].set_out(self.toCenterRelative(self.maxSalienceLoc))
self.updateFeatureVector() # external buffer reads may need this
if self.state == self.State.FREE:
if self.maxSalience >= self.min_good_salience or \
(self.maxSalience >= self.min_saccade_salience and self.timeNow > (self.lastTransitionTime + self.max_free_duration)): # we have good (or good enough) salience, lets saccade to it
self.saccadeSalience = self.maxSalience
self.saccadeTarget = np.int_(self.buffers['location'].get_out()) # ocular motion system requires a 2-element numpy array
self.performSaccade(self.saccadeTarget)
elif self.timeNow > (self.lastTransitionTime + self.max_free_duration): # we've been waiting too long, nothing significant, let's reset
self.performSaccade(None) # TODO: Probabilistically choose a not-so-good location?
elif self.state == self.State.FIXATE:
# Update fixation location (first time this fixation only)
# TODO: Maybe a good idea to use a new FIXATED state after FIXATE?
if self.fixationLoc is None:
self.fixationLoc = self.maxSalienceLoc
self.fixationSlice = np.index_exp[int(self.fixationLoc[1] - self.foveaSize[1] / 2):int(self.fixationLoc[1] + self.foveaSize[1] / 2), int(self.fixationLoc[0] - self.foveaSize[0] / 2):int(self.fixationLoc[0] + self.foveaSize[0] / 2)]
# NOTE: This slice could be smaller than self.foveaSize
self.logger.info("Fixated at: {}, fixation slice: {}".format(self.fixationLoc, self.fixationSlice))
# Update feature vector representing current state of neurons
self.logger.debug("[{:.2f}] Features: {}".format(self.timeNow, self.featureVector)) # [verbose]
#self.logger.debug("[{:.2f}] Feature matrix:\n {}".format(self.timeNow, "\n ".join("{}: {}".format(label, self.featureMatrix[i]) for i, label in enumerate(self.featureLabels)))) # [very verbose!]
self.buffers['features'].set_out(dict(izip(self.featureLabels, self.featureVector))) # TODO: find a better way than zipping every iteration (named tuple or something?)
if self.timeNow > (self.lastTransitionTime + self.min_fixation_duration):
# TODO: Update match buffer based on feature values and weights
# TODO: Compute utility based on duration of fixation (falling activation), match and/or salience
# TODO: If very high utility, turn on hold (assuming agent will ask us to release)
# If low utility or past max_fixation_duration, switch to FREE state and look somewhere else
maxSalienceLocDist = hypot(self.maxSalienceLoc[0] - self.fixationLoc[0], self.maxSalienceLoc[1] - self.fixationLoc[1])
# Put a limit on hold
if self.hold and self.timeNow > (self.lastTransitionTime + self.max_hold_duration):
self.hold = False # NOTE: This forcefully breaks a hold; might be better to depend on salient stimuli
# Check for possible transitions out of FIXATE
if not self.hold and \
(maxSalienceLocDist > self.fovealRadius or \
self.maxSalience < self.saccadeSalience or \
self.timeNow > (self.lastTransitionTime + self.max_fixation_duration)):
# Create FINST to inhibit current location in future, before switching to FREE
if self.maxSalience >= self.min_saccade_salience: # if current location is still salient enough to elicit a saccade
self.finsts.append(Finst(self.fixationLoc, self.ocularMotionSystem.getFocusPoint(), timeCreated=self.timeNow)) # TODO: pass in activationCreated once FINSTs are stored in priority queue
self.fixationLoc = None # set to None to indicate we're no longer fixated; next fixation will store a new location
self.transition(self.State.FREE)
# * Show output images if in GUI mode
if self.context.options.gui:
#cv2.imshow("Hue", self.images['H'])
#cv2.imshow("Saturation", self.images['S'])
#cv2.imshow("Value", self.images['V'])
if self.context.options.debug: # only show detail when in debug mode; limit to important images/maps
#cv2.imshow("Rod response", self.images['Rod'])
#for coneType, coneImage in self.images['Cone'].iteritems():
# cv2.imshow("{} Cones".format(coneType), coneImage)
for bipolarType, bipolarImage in self.images['Bipolar'].iteritems():
cv2.imshow("{} Bipolar cells".format(bipolarType), bipolarImage)
for ganglionType, ganglionImage in self.images['Ganglion'].iteritems():
cv2.imshow("{} Ganglion cells".format(ganglionType), ganglionImage)
#cv2.imshow("{} Ganglion cells".format(ganglionType), np.sqrt(self.featurePathways[ganglionType].p) * ganglionImage) # show image weighted by selected feature probability, artificially scaled to make responses visible
#cv2.imshow("Salience", self.images['Salience']) # combined salience image
# Designate a representative output image
#self.imageOut = cv2.bitwise_and(self.retina.images['BGR'], self.retina.images['BGR'], mask=self.imageSelectionOut) # mask out everything outside selected neuron's receptive field
self.imageOut = self.images['Salience'] # make a copy?
#_, self.imageOut = cv2.threshold(self.imageOut, 0.1, 1.0, cv2.THRESH_TOZERO) # apply threshold to remove low-response regions
self.imageOut = np.uint8(self.imageOut * 255) # convert to uint8 image for display (is this necessary?)
if self.maxSalience >= self.min_saccade_salience:
cv2.circle(self.imageOut, self.maxSalienceLoc, 3, 175, -1) # mark most salient location with a small faint dot
if self.maxSalience >= self.min_good_salience:
cv2.circle(self.imageOut, self.maxSalienceLoc, int(self.maxSalience * 25), int(128 + self.maxSalience * 127), 1 + int(self.maxSalience * 4)) # highlight highly salient locations: larger, fatter, brighter for higher salience value
if self.state == self.State.FIXATE and self.fixationLoc is not None:
cv2.circle(self.imageOut, self.fixationLoc, 1, 225, -1) # mark fixation location with a tiny bright dot
cv2.putText(self.imageOut, self.State.toString(self.state) + (" (holding)" if self.hold else ""), (20, 40), cv2.FONT_HERSHEY_PLAIN, 1.5, 200, 2) # show current state
return True, self.imageOut
def stop(self):
# TODO Ensure this gets called for proper clean-up, esp. now that we are using an animated plot
if self.context.options.gui:
self.neuronPotentialMonitor.stop()
def transition(self, next_state):
self.logger.info("[{:.2f}] Transitioning from {} to {} state after {:.2f}s".format(self.timeNow, self.State.toString(self.state), self.State.toString(next_state), (self.timeNow - self.lastTransitionTime)))
self.state = next_state
self.lastTransitionTime = self.timeNow
self.buffers['state'].set_out(self.state) # update corresponding buffer
def handleIntent(self, intent):
if intent is None or intent not in self.intents:
self.logger.warning("Unknown/null intent: '%s'", intent)
return
self.logger.info("Intent: %s", intent)
if intent == 'find':
# NOTE All relevant buffers must be set *before* find intent is sent in
self.transition(self.State.FREE) # reset state to use new weights
self.hold = False # implies we can move around again
elif intent == 'hold':
self.hold = True # system won't perform saccades, even if utility drops
if self.state == self.State.FREE:
self.transition(self.State.FIXATE) # transition to FIXATE state (unless performing a saccade)
elif intent == 'release':
self.hold = False # system can resume FIXATE-SACCADE cycle
elif intent == 'reset':
self.finsts.clear()
self.transition(self.State.SACCADE)
self.ocularMotionSystem.reset() # reset to the center of visual stream
self.hold = False
else:
self.logger.warning("Unhandled intent: '%s'", intent)
def performSaccade(self, saccadeTarget=None):
if self.ocularMotionSystem is not None:
self.transition(self.State.SACCADE)
if saccadeTarget is not None:
self.ocularMotionSystem.move(saccadeTarget)
else:
self.ocularMotionSystem.reset()
else:
self.logger.warning("Ocular motion system not found, skipping to FIXATE")
self.transition(self.State.FIXATE)
def inhibitMapAtFinst(self, imageMap, finst):
loc = finst.getAdjustedLocation(self.ocularMotionSystem.getFocusPoint())
#cv2.circle(imageMap, loc, finst.radius, 0.0, cv.CV_FILLED) # hard inhibition with solid 0 circle
# Soft inhibition using finst.inhibitionMap (TODO: affected by finst.activation?)
inhibitionTarget = imageMap[max(loc[1] - finst.radius, 0):min(loc[1] + finst.radius, imageMap.shape[0]), max(loc[0] - finst.radius, 0):min(loc[0] + finst.radius, imageMap.shape[1])]
sourceTopLeft = (max(finst.radius - loc[1], 0), max(finst.radius - loc[0], 0)) # (y, x)
inhibitionSource = finst.inhibitionMap[sourceTopLeft[0]:(sourceTopLeft[0] + inhibitionTarget.shape[0]), sourceTopLeft[1]:(sourceTopLeft[1] + inhibitionTarget.shape[1])]
#self.logger.debug("loc: {}, source.shape: {}, target.shape: {}, sourceTopLeft: {}".format(loc, inhibitionSource.shape, inhibitionTarget.shape, sourceTopLeft))
inhibitionTarget *= (1.0 - finst.activation * inhibitionSource)
#cv2.putText(imageMap, "{:.2f}".format(finst.timeCreated), (loc[0] + finst.radius, loc[1] - finst.radius), cv2.FONT_HERSHEY_PLAIN, 1, 0.0) # [debug]
def updateFeatureWeights(self, featureWeights, rest=None):
"""Update weights for features mentioned in given dict, using rest for others if not None."""
# TODO Handle special labels for spatial selection
if rest is None:
rest = featureWeights.get('rest', None) # rest may also be passed in as a dict item
for label, pathway in self.featurePathways.iteritems():
if label in featureWeights:
pathway.p = featureWeights[label]
elif rest is not None:
pathway.p = rest
def updateFeatureVector(self):
# TODO: Also compute mean and variance over a moving window here? (or should that be an agent/manager-level function?)
# Feature vector picks a single value from each channel
self.featureVector = np.float32([pathway.output.neurons[0].potential for pathway in self.featurePathways.itervalues()])
# Feature matrix picks all neuron values from each channel
self.featureMatrix = np.float32([[neuron.potential for neuron in pathway.output.neurons] for pathway in self.featurePathways.itervalues()])
def toCenterRelative(self, coords):
return (coords[0] - self.imageCenter[0], coords[1] - self.imageCenter[1]) # convert to center-relative coordinates
def createPopulation(self, *args, **kwargs):
"""Create a basic Population with given arguments."""
return self.addPopulation(Population(*args, **kwargs))
def addPopulation(self, population):
"""Add a given Population to this VisualSystem."""
#assert isinstance(population, Population) # allow other Population-like objects?
assert population.label not in self.populations # refuse to overwrite existing population with same label
self.populations.append(population)
return population
def createProjection(self, presynaptic_population, postsynaptic_population, **kwargs):
"""Create a basic Projection from presynaptic to postsynaptic population, with given keyword arguments."""
assert presynaptic_population in self.populations and postsynaptic_population in self.populations
return self.addProjection(Projection(presynaptic_population, postsynaptic_population, **kwargs))
def addProjection(self, projection):
self.projections.append(projection)
return projection
def createRetina(self):
# TODO * Create Photoreceptor layer
# TODO * Create BipolarCell layer
# TODO * Create GanglionCell layer
pass
def createVisualCortex(self):
# * Create several feature pathways, each with a salience, selection and feature layer
self.featureLabels = self.images['Ganglion'].keys() # cached for frequent use (NOTE currently will need to be updated if self.images['Ganglion'] changes)
self.featurePathways = OrderedDict()
for pathwayLabel in self.featureLabels: # Ganglion cells are the source of each low-level visual pathway
self.logger.info("Creating '{}' feature pathway".format(pathwayLabel))
# ** Create layers
# *** Salience neurons (TODO introduce magno and parvo types; expose layer parameters such as Z-axis position)
salienceLayerBounds = np.float32([[0.0, 0.0, 0.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 0.0]])
salienceNeuronDistribution = MultivariateNormal(mu=self.center, cov=(np.float32([self.center[0] ** 1.5, self.center[1] ** 1.5, 1.0]) * np.identity(3, dtype=np.float32)))
#salienceNeuronDistribution = MultivariateUniform(lows=[0.0, 0.0, 0.0], highs=[self.imageSize[0], self.imageSize[1], 0.0])
salienceNeurons = Population(numNeurons=self.num_salience_neurons, timeNow=self.timeNow, neuronTypes=[SalienceNeuron], bounds=salienceLayerBounds, distribution=salienceNeuronDistribution, system=self, pathway=pathwayLabel, imageSet=self.images['Ganglion'])
# TODO self.addPopulation(salienceNeurons)?
# *** Selection neurons
selectionLayerBounds = np.float32([[0.0, 0.0, 50.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 50.0]])
selectionNeuronDistribution = MultivariateNormal(mu=self.center + np.float32([0.0, 0.0, 50.0]), cov=(np.float32([self.center[0] ** 1.5, self.center[1] ** 1.5, 1.0]) * np.identity(3, dtype=np.float32)))
#selectionNeuronDistribution = MultivariateUniform(lows=[0.0, 0.0, 50.0], highs=[self.imageSize[0], self.imageSize[1], 50.0])
selectionNeurons = Population(numNeurons=self.num_selection_neurons, timeNow=self.timeNow, neuronTypes=[SelectionNeuron], bounds=selectionLayerBounds, distribution=selectionNeuronDistribution, system=self, pathway=pathwayLabel)
# TODO self.addPopulation(selectionNeurons)?
# *** Feature neurons (usually a single neuron for most non spatially-sensitive features)
featureLayerBounds = np.float32([[0.0, 0.0, 100.0], [self.imageSize[0] - 1, self.imageSize[1] - 1, 100.0]])
featureNeuronDistribution = MultivariateNormal(mu=self.center + np.float32([0.0, 0.0, 100.0]), cov=(np.float32([self.center[0] / 10, self.center[1] / 10, 1.0]) * np.identity(3, dtype=np.float32))) # positioning doesn't matter much
featureNeurons = Population(numNeurons=self.num_feature_neurons, timeNow=self.timeNow, neuronTypes=[FeatureNeuron], bounds=featureLayerBounds, distribution=featureNeuronDistribution, system=self, pathway=pathwayLabel)
# TODO Set feature neuron plotColor to something more representative of the pathway
# ** Connect neuron layers
# *** Salience neurons to selection neurons (TODO use createProjection() once Projection is implemented, and register using self.addProjection)
salienceNeurons.connectWith(selectionNeurons, maxConnectionsPerNeuron=5)
# For selection neurons, finalize their receptive field radii based on connected neurons (average distance to extrema)
minRFRadius = None
maxRFRadius = None
for selectionNeuron in selectionNeurons.neurons:
xlim = [selectionNeuron.location[0], selectionNeuron.location[0]] # min, max
ylim = [selectionNeuron.location[1], selectionNeuron.location[1]] # min, max
for inputNeuron in selectionNeuron.inputNeurons:
xlim[0] = min(xlim[0], inputNeuron.location[0] - inputNeuron.rfRadius)
xlim[1] = max(xlim[1], inputNeuron.location[0] + inputNeuron.rfRadius)
ylim[0] = min(ylim[0], inputNeuron.location[1] - inputNeuron.rfRadius)
ylim[1] = max(ylim[1], inputNeuron.location[1] + inputNeuron.rfRadius)
selectionNeuron.rfRadius = int((hypot(xlim[0] - selectionNeuron.location[0], ylim[0] - selectionNeuron.location[1]) + \
hypot(xlim[1] - selectionNeuron.location[0], ylim[1] - selectionNeuron.location[1])) / 2)
# NOTE: We don't need much precision for this estimated RF radius - it is mainly used to categorize these neurons into broad groups, and for display
if minRFRadius is None or selectionNeuron.rfRadius < minRFRadius:
minRFRadius = selectionNeuron.rfRadius
if maxRFRadius is None or selectionNeuron.rfRadius > maxRFRadius:
maxRFRadius = selectionNeuron.rfRadius
# *** Selection neurons to feature neurons (all-to-all, filtered by receptive field size)
featureRFRadiusStep = float(maxRFRadius - minRFRadius) / self.num_feature_neurons # size of each uniform RF radius division to categorize input neurons in the featureNeurons layer
for source in selectionNeurons.neurons:
# All-to-all
#for target in featureNeurons.neurons:
# source.synapseWith(target)
# Filtered by receptive field size
idx = int((source.rfRadius - minRFRadius) / featureRFRadiusStep)
if idx >= self.num_feature_neurons:
idx = self.num_feature_neurons - 1 # ensure idx is range
source.synapseWith(featureNeurons.neurons[idx]) # connect with appropriate feature neuron
selectionNeurons.isConnected = True # NOTE need to explicitly do this since we're not using Population.connectWith()
# *** Selection neurons to themselves (lateral inhibition; TODO make this a re-entrant inhibitory Projection with allow_self_connections=False?)
for source in selectionNeurons.neurons:
for target in selectionNeurons.neurons:
if source == target: continue
source.gateNeuron(target)
# ** Add to dictionary of feature pathways
self.featurePathways[pathwayLabel] = VisualFeaturePathway(label=pathwayLabel, populations=[salienceNeurons, selectionNeurons, featureNeurons], projections=None, output=featureNeurons, timeNow=self.timeNow)
# ** Show neuron layers and connections [debug]
#plotPopulations([salienceNeurons, selectionNeurons, featureNeurons], showConnections=True, equalScaleZ=True) # [debug]
# * Initialize feature vector
self.featureVector = None
self.updateFeatureVector()
@rpc.enable
def getBuffer(self, name):
try:
value = self.buffers[name].get()
if callable(value): # allows output buffer values to be callables (e.g. getter functions) that get called when retrieved
value = value()
#self.logger.debug("%s: %s", name, value) # [verbose]
return value
except KeyError as e:
self.logger.error("Buffer KeyError: %s", e)
except BufferAccessError as e:
self.logger.error("BufferAccessError (get '%s'): %s", name, e)
return None # failed
@rpc.enable
def setBuffer(self, name, value):
try:
#self.logger.debug("%s: %s", name, value) # [verbose]
obj = self.buffers[name].value # NOTE direct access (not encouraged - can this be done using simple Python properties?)
if callable(obj): # allows input buffer values to be callables (e.g. setter functions) that get called when the buffer is written to
obj(value)
else:
self.buffers[name].set(value)
return True # NOTE may not give the right indication if obj was a callable and returned a meaningful value
except KeyError as e:
self.logger.error("Buffer KeyError: %s", e)
except BufferAccessError as e:
self.logger.error("BufferAccessError (set '%s'): %s", name, e)
return False # failed
@rpc.enable
def listBuffers(self, types=False):
"""Return a list of exposed buffers (flat list), optionally with each buffer's type as well (list of 2-tuples)."""
return [(name, buf.__class__.__name__) if types else name for name, buf in self.buffers.iteritems()]
@rpc.enable_image
def getImage(self, key='BGR'):
try:
return self.images[key]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getFovealImage(self, key='BGR'):
try:
return self.images[key][self.fovealSlice]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getFixatedImage(self, key='BGR'):
try:
return self.images[key][self.fixationSlice]
except KeyError as e:
self.logger.error("Image KeyError: %s", e)
return None
@rpc.enable_image
def getOutputImage(self):
if self.context.options.gui:
return self.imageOut
else:
return None
class VisionManager(Projector):
"""A version of Projector that defaults to using a VisualSystem as target."""
def __init__(self, target=None, *args, **kwargs):
Projector.__init__(self, target if target is not None else VisualSystem(), *args, **kwargs)
self.visualSystem = self.target # synonym - Projector uses the generic term target
self.ocularMotionSystem = EmulatedOcularMotionSystem(self, timeNow=self.context.timeNow)
self.visualSystem.ocularMotionSystem = self.ocularMotionSystem
def process(self, imageIn, timeNow):
self.ocularMotionSystem.update(timeNow)
return Projector.process(self, imageIn, timeNow)
class FeatureManager(VisionManager):
"""A visual system manager for computing stable features."""
State = Enum(('NONE', 'INCOMPLETE', 'UNSTABLE', 'STABLE'))
min_duration_incomplete = 2.0 # min. seconds to spend in incomplete state before transitioning (rolling buffer not full yet/neurons not activated enough)
min_duration_unstable = 2.0 # min. seconds to spend in unstable state before transitioning (avoid short stability periods)
max_duration_unstable = 5.0 # max. seconds to spend in unstable state before transitioning (avoid being stuck waiting forever for things to stabilize)
min_duration_stable = 0.5 # avoid quick switches (attention deficiency)
max_duration_stable = 2.0 # don't stare for too long (excess fixation)
feature_buffer_size = 10 # number of iterations/samples to compute feature vector statistics over (rolling window)
max_feature_sd = 0.005 # max. s.d. (units: Volts) to tolerate in judging a signal as stable
def __init__(self, *args, **kwargs):
kwargs['screen_background'] = kwargs.get('screen_background', np.uint8([0, 0, 0]))
VisionManager.__init__(self, *args, **kwargs)
self.state = self.State.NONE
self.lastTransitionTime = -1.0
def initialize(self, imageIn, timeNow):
VisionManager.initialize(self, imageIn, timeNow)
self.numFeatures = len(self.visualSystem.featureVector)
self.featureVectorBuffer = np.zeros((self.feature_buffer_size, self.numFeatures), dtype=np.float32) # rolling buffer of feature vector samples
self.featureVectorIndex = 0 # index into feature vector buffer (count module size)
self.featureVectorCount = 0 # no. of feature vector samples collected (same as index, sans modulo)
self.featureVectorMean = np.zeros(self.numFeatures, dtype=np.float32) # column mean of values in buffer
self.featureVectorSD = np.zeros(self.numFeatures, dtype=np.float32) # standard deviation of values in buffer
self.featureMatrixBuffer = np.zeros((self.feature_buffer_size, self.numFeatures, self.visualSystem.num_feature_neurons), dtype=np.float32) # follows featureVectorBuffer
self.featureMatrixMean = np.zeros((self.numFeatures, self.visualSystem.num_feature_neurons), dtype=np.float32) # follows featureVectorMean
self.logger.info("[{:.2f}] Features: {}".format(timeNow, self.visualSystem.featureLabels))
self.transition(self.State.INCOMPLETE, timeNow)
self.logger.debug("Initialized")
def process(self, imageIn, timeNow):
keepRunning, imageOut = VisionManager.process(self, imageIn, timeNow)
# Compute featureVector mean and variance over a moving window (also featureMatrix mean)
self.featureVectorBuffer[self.featureVectorIndex, :] = self.visualSystem.featureVector
self.featureMatrixBuffer[self.featureVectorIndex, :] = self.visualSystem.featureMatrix
self.featureVectorCount += 1
self.featureVectorIndex = self.featureVectorCount % self.feature_buffer_size
np.mean(self.featureVectorBuffer, axis=0, dtype=np.float32, out=self.featureVectorMean) # always update mean, in case someone needs it
# TODO: debug here
np.mean(self.featureMatrixBuffer, axis=0, dtype=np.float32, out=self.featureMatrixMean)
# Change state according to feature vector values (and visual system's state)
deltaTime = timeNow - self.lastTransitionTime
if self.state == self.State.INCOMPLETE and \
deltaTime > self.min_duration_incomplete and \
self.featureVectorCount >= self.feature_buffer_size and \
self.visualSystem.state == VisualSystem.State.FIXATE:
self.visualSystem.setBuffer('intent', 'hold') # ask system to hold gaze (i.e. no saccades)
self.transition(self.State.UNSTABLE, timeNow)
elif self.state == self.State.UNSTABLE or self.state == self.State.STABLE:
if self.visualSystem.state == VisualSystem.State.FIXATE:
np.std(self.featureVectorBuffer, axis=0, dtype=np.float32, out=self.featureVectorSD)
self.logger.debug("[{:.2f}] Mean: {}".format(timeNow, self.featureVectorMean)) # [verbose]
self.logger.debug("[{:.2f}] S.D.: {}".format(timeNow, self.featureVectorSD)) # [verbose]
self.logger.debug("[{:.2f}] Feature matrix:\n {}".format(timeNow, "\n ".join("{}: {}".format(label, self.featureMatrixMean[i]) for i, label in enumerate(self.visualSystem.featureLabels))))
if self.state == self.State.UNSTABLE and deltaTime > self.min_duration_unstable and \
(np.max(self.featureVectorSD) <= self.max_feature_sd or deltaTime > self.max_duration_unstable): # TODO use a time-scaled low-pass filtered criteria
self.transition(self.State.STABLE, timeNow)
elif self.state == self.State.STABLE and deltaTime > self.min_duration_stable and \
(np.max(self.featureVectorSD) > self.max_feature_sd or deltaTime > self.max_duration_stable):
self.transition(self.State.UNSTABLE, timeNow)
self.visualSystem.setBuffer('intent', 'find') # let system return to FIXATE-SACCADE mode (without inhibition)
else: # something made visual system lose focus, including us releasing the system
self.transition(self.State.INCOMPLETE, timeNow)
return keepRunning, imageOut
def transition(self, next_state, timeNow):
self.logger.debug("[{:.2f}] Transitioning from {} to {} state after {:.2f}s".format(timeNow, self.State.toString(self.state), self.State.toString(next_state), (timeNow - self.lastTransitionTime)))
self.state = next_state
self.lastTransitionTime = timeNow
@rpc.enable
def getState(self):
return self.State.toString(self.state)
@rpc.enable
def getFeatureVector(self):
return self.featureVectorMean.tolist()
@rpc.enable
def getFeatureMatrix(self):
return self.featureMatrixMean.tolist() # will be a nested list, not flat
def main(managerType=VisionManager):
"""Run end-to-end visual system."""
context = Context.createInstance(description="Run a VisualSystem instance using a {}".format(managerType.__name__))
print "main(): Creating visual system and manager"
visSystem = VisualSystem()
visManager = managerType(visSystem)
if context.isRPCEnabled:
print "main(): Exporting RPC calls"
rpc.export(visSystem)
rpc.export(visManager)
rpc.refresh() # Context is expected to have started RPC server
print "main(): Starting vision loop"
run(visManager)
if context.isRPCEnabled:
rpc.stop_server() # do we need to do this if server is running as a daemon?
print "main(): Done."
def test_FeatureManager_RPC():
from time import sleep
from multiprocessing import Process, Value
Context.createInstance()
print "test_FeatureManager_RPC(): Creating visual system and manager"
visSystem = VisualSystem()
visManager = FeatureManager(visSystem)
print "test_FeatureManager_RPC(): Exporting RPC calls"
rpc.export(visSystem) # order of export vs. enable doesn't matter - everything will be resolved in refresh(), called by start_server()
rpc.export(visManager)
print "test_FeatureManager_RPC(): Starting RPC server thread"
rpcServerThread = rpc.start_server_thread(daemon=True)
# NOTE shared_loop_flag must be a multiprocessing.Value or .RawValue
# NOTE gui should be set to true only if this is being run in its own dedicated process, without any shared GUI infrastructure
def rpcClientLoop(shared_loop_flag, gui=False):
with rpc.Client() as rpcClient:
while shared_loop_flag.value == 1:
try:
for call in ['FeatureManager.getState', 'FeatureManager.getFeatureVector']: # 'VisualSystem.getOutputImage'
print "[RPC-Client] REQ:", call
retval = rpcClient.call(call)
if isinstance(retval, np.ndarray):
print "[RPC-Client] REP[image]: shape: {}, dtype: {}".format(retval.shape, retval.dtype)
# NOTE Qt (and possibly other backends) can only display from the main thread of a process
if gui:
cv2.imshow("VisualSystem output", retval)
cv2.waitKey(10)
else:
print "[RPC-Client] REP:", retval
if retval is None:
break
sleep(0.5) # small sleep to prevent flooding
sleep(0.5) # extra sleep after each state, vector pair
except KeyboardInterrupt:
break
print "test_FeatureManager_RPC(): Starting RPC client process"
rpc_client_loop_flag = Value('i', 1)
# NOTE No GUI output possible from child process; this will simply print metadata for any images received
rpcClientProcess = Process(target=rpcClientLoop, name="RPC-Client", args=(rpc_client_loop_flag,))
rpcClientProcess.daemon=True
rpcClientProcess.start()
sleep(0.01) # let new process start
print "test_FeatureManager_RPC(): Starting vision loop"
run(visManager)
print "test_FeatureManager_RPC(): Vision loop done; waiting for RPC threads/processes to join..."
rpc_client_loop_flag.value = 0
if rpc.Client.recv_timeout is not None: # just a guess, actual timeout used could be different
rpcClientProcess.join(rpc.Client.recv_timeout / 1000.0 + 1.0)
print "test_FeatureManager_RPC(): RPC client process joined (or timeout)"
rpc.stop_server()
if rpc.Server.recv_timeout is not None: # just a guess, actual timeout used could be different
rpcServerThread.join(rpc.Server.recv_timeout / 1000.0 + 1.0)
print "test_FeatureManager_RPC(): RPC server thread joined (or timeout)"
print "test_FeatureManager_RPC(): Done."
# Testing
if __name__ == "__main__":
# NOTE Defaults to using FeatureManager instead of VisualManager
choices = [('--test_rpc', "Test RPC functionality by running a client, server pair")]
context = Context.createInstance(parent_argparsers=[Context.createChoiceParser(choices)])
if context.options.test_rpc:
test_FeatureManager_RPC()
else:
main(managerType=FeatureManager) # will enable RPC calls if --rpc was passed in
| {
"repo_name": "napratin/nap",
"path": "nap/vision/visual_system.py",
"copies": "1",
"size": "62141",
"license": "mit",
"hash": 4449551680707219000,
"line_mean": 61.5790533736,
"line_max": 310,
"alpha_frac": 0.7012439452,
"autogenerated": false,
"ratio": 3.4746700961753523,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46759140413753525,
"avg_score": null,
"num_lines": null
} |
""" a bit faster math operations when knowing what you're doing"""
import numpy as np
from scipy import linalg
def dot(A,B):
"""
Dot product of two arrays that directly calls blas libraries
For 2-D arrays it is equivalent to matrix multiplication, and for 1-D
arrays to inner product of vectors (without complex conjugation). For N
dimensions it is a sum product over the last axis of `a` and the
second-to-last of `b`::
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
Parameters
----------
A : array_like
First argument.
B : array_like
Second argument.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
"""
def _force_forder(x):
"""
Converts arrays x to fortran order. Returns
a tuple in the form (x, is_transposed).
"""
if x.flags.c_contiguous:
return (x.T, True)
else:
return (x, False)
A, trans_a = _force_forder(A)
B, trans_b = _force_forder(B)
gemm_dot = linalg.get_blas_funcs("gemm", arrays=(A,B))
# gemm is implemented to compute: C = alpha*AB + beta*C
return gemm_dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def percentile(data, percentiles, weights=None):
"""Compute weighted percentiles.
If the weights are equal, this is the same as normal percentiles.
Elements of the data and wt arrays correspond to each other and must have
equal length.
If wt is None, this function calls numpy's percentile instead (faster)
TODO: re-implementing the normal percentile could be faster
because it would avoid more variable checks and overheads
Parameters
----------
data: ndarray[float, ndim=1]
data points
percentiles: ndarray[float, ndim=1]
percentiles to use. (between 0 and 100)
weights: ndarray[float, ndim=1] or None
Weights of each point in data
All the weights must be non-negative and the sum must be
greater than zero.
Returns
-------
p: ndarray[float, ndim=1]
the weighted percentiles of the data.
percentile
----------
A percentile is the value of a variable below which a certain percent of
observations fall.
The term percentile and the related term percentile rank are often used in
the reporting of scores from *normal-referenced tests*, 16th and 84th
percentiles corresponding to the 1-sigma interval of a Normal distribution.
Note that there are very common percentiles values:
* 0th = minimum value
* 50th = median value
* 100th = maximum value
Weighted percentile
-------------------
A weighted percentile where the percentage in the total weight is counted
instead of the total number. *There is no standard function* for a weighted
percentile.
Implementation
--------------
The method implemented here extends the commom percentile estimation method
(linear interpolation beteeen closest ranks) approach in a natural way.
Suppose we have positive weights, W= [W_i], associated, respectively, with
our N sorted sample values, D=[d_i]. Let S_n = Sum_i=0..n {w_i} the
the n-th partial sum of the weights. Then the n-th percentile value is
given by the interpolation between its closest values v_k, v_{k+1}:
v = v_k + (p - p_k) / (p_{k+1} - p_k) * (v_{k+1} - v_k)
where
p_n = 100/S_n * (S_n - w_n/2)
"""
# check if actually weighted percentiles is needed
if weights is None:
return np.percentile(data, list(percentiles))
if np.equal(weights, 1.).all():
return np.percentile(data, list(percentiles))
# make sure percentiles are fractions between 0 and 1
if not np.greater_equal(percentiles, 0.0).all():
raise ValueError("Percentiles less than 0")
if not np.less_equal(percentiles, 100.0).all():
raise ValueError("Percentiles greater than 100")
#Make sure data is in correct shape
shape = np.shape(data)
n = len(data)
if (len(shape) != 1):
raise ValueError("wrong data shape, expecting 1d")
if len(weights) != n:
raise ValueError("weights must be the same shape as data")
if not np.greater_equal(weights, 0.0).all():
raise ValueError("Not all weights are non-negative.")
_data = np.asarray(data, dtype=float)
if hasattr(percentiles, '__iter__'):
_p = np.asarray(percentiles, dtype=float) * 0.01
else:
_p = np.asarray([percentiles * 0.01], dtype=float)
_wt = np.asarray(weights, dtype=float)
len_p = len(_p)
sd = np.empty(n, dtype=float)
sw = np.empty(n, dtype=float)
aw = np.empty(n, dtype=float)
o = np.empty(len_p, dtype=float)
i = np.argsort(_data)
np.take(_data, i, axis=0, out=sd)
np.take(_wt, i, axis=0, out=sw)
np.add.accumulate(sw, out=aw)
if not aw[-1] > 0:
raise ValueError("Nonpositive weight sum")
w = (aw - 0.5 * sw) / aw[-1]
spots = np.searchsorted(w, _p)
for (pk, s, p) in zip(range(len_p), spots, _p):
if s == 0:
o[pk] = sd[0]
elif s == n:
o[pk] = sd[n - 1]
else:
f1 = (w[s] - p) / (w[s] - w[s - 1])
f2 = (p - w[s - 1]) / (w[s] - w[s - 1])
assert (f1 >= 0) and (f2 >= 0) and (f1 <= 1 ) and (f2 <= 1)
assert abs(f1 + f2 - 1.0) < 1e-6
o[pk] = sd[s - 1] * f1 + sd[s] * f2
return o
| {
"repo_name": "mfouesneau/faststats",
"path": "faststats/math.py",
"copies": "1",
"size": "5806",
"license": "mit",
"hash": 6556954633881829000,
"line_mean": 30.5543478261,
"line_max": 79,
"alpha_frac": 0.599724423,
"autogenerated": false,
"ratio": 3.4416123295791348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9537878420707944,
"avg_score": 0.0006916663742382239,
"num_lines": 184
} |
''' a bit more in the comment...
'''
import dynamics.simulation
from dynamics.frame import Frame
from dynamics.spring import NailSpring
from dynamics.object import Rectangle, Circle, Beam
from dynamics.constraint import Nail, Rod, Pin, Shelf
from dynamics.animation import Animation
from dynamics.constants import foot2meter, inch2meter, meter2foot
from dynamics.misc import length_, rot2radians, radians2rot
from dynamics.constants import lb2kgram, kgram2lb, newton2lb
from dynamics.constants import pine_density, steel_density
from flight import Flight
import scipy
import scipy.interpolate
import numpy as np
from math import pi, sin, cos, sqrt, acos, atan2
#from scipy.optimize.optimize import fmin
from scipy.optimize.minpack import fsolve
#from scipy.interpolate.fitpack2 import UnivariateSpline
from pylab import plot
scipy.set_printoptions(precision=5, linewidth=200)
def treb( sling_length = 8.54665, # sling length, feet
ramp_length = 11, # ramp length, feet
link_sum = 5.587, # sum of upper and lower link lengths, feet
hanger_x = 11.38508, # feet
hanger_y = -2,
hinge_x = (6.+2.)/12., # feet
hinge_y = -4.0,
alpha=90, # arm start angle, ccw from horizontal (degrees)
omega=10, # cocked angle between upper link and lower link
cw_drop = 5.0, # feet
cw_weight = 4581., # pounds
cw_moment_arm = 10.41, # distance from hinge to cw center of gravity, feet
cw_moment = 3.516e6, # counterweight moment about its CG, lb*ft^2
upper_link_weight = 2*58., # pounds
lower_link_weight = 2*52., # pounds
link_axle_weight = 106, # pounds
connector_rod_weight = 84.8, # pounds
connector_brace_weight = 105, # pounds
pumpkin_weight = 10.0, # pounds
sling_weight = 1.7, # pounds
sim_duration = 2.0, # seconds
dry_fire = False, # True to disable sling from time 0
time_step = 0.001, # seconds
slide_y = -9, # feet
arm_depth = (10.+1./4.)/12., # inches
arm_thick = (5.+1./4.)/12., # inches
arm_end_depth = (6.+5./8)/12.,# inches
arm_end_thick = (3.+1./8)/12.,# inches
release_pin_weight = 9, # pounds
release_time = 0.0, #seconds
debug = True):
sim = dynamics.simulation.Simulation(max_time=sim_duration,
time_step=time_step)
sim.debug=debug
# convert arguments to metric and radians
sling_length = foot2meter(sling_length)
hanger_pos = foot2meter(np.array((hanger_x, hanger_y)))
del hanger_x, hanger_y
hinge_pos = foot2meter(np.array((hinge_x, hinge_y)))
del hinge_x, hinge_y
slide_y = foot2meter(slide_y)
arm_depth = foot2meter(arm_depth)
arm_thick = foot2meter(arm_thick)
arm_end_depth = foot2meter(arm_end_depth)
arm_end_thick = foot2meter(arm_end_thick)
ramp_length = foot2meter(ramp_length)
link_sum = foot2meter(link_sum)
sim.release_time = release_time
alpha = scipy.deg2rad(alpha)
omega = scipy.deg2rad(omega)
cw_drop = foot2meter(cw_drop)
cw_mass = lb2kgram(cw_weight)
cw_moment_arm = foot2meter(cw_moment_arm)
cw_moment = cw_moment / 32.174049 * 0.00029263965 # convert lb to slug, then
# slug*in^2 to kgram*meter^2
connector_rod_mass = lb2kgram(connector_rod_weight)
connector_brace_mass = lb2kgram(connector_brace_weight)
upper_link_mass = lb2kgram(upper_link_weight)
lower_link_mass = lb2kgram(lower_link_weight)
link_axle_mass = lb2kgram(link_axle_weight)
pumpkin_mass = lb2kgram(pumpkin_weight)
sling_mass = lb2kgram(sling_weight)
release_pin_mass = lb2kgram(release_pin_weight)
# long arm length to reach slide
long_arm_length = -slide_y / np.sin(alpha) - inch2meter(0)
# compute rest cw position thru triangulation
rest_cw_ctr = circle_intersection(hanger_pos, link_sum,
hinge_pos, ramp_length)
# compute cocked cw position on circle about hinge, up 'drop' meters from rest position
cocked_cw_ctr = np.array((None, rest_cw_ctr[1] + cw_drop))
# ramp_length**2 = (x-hinge_x)**2 + (y-hinge_y)**2
cocked_cw_ctr[0] = hinge_pos[0] + sqrt(ramp_length**2 - (cocked_cw_ctr[1]-hinge_pos[1])**2)
# cocked connection point is on ellipse w/ foci at hanger and cocked_cw, 'string' length
# equal to link_sum, 'string' interior angle omega. In maxima:
# r2: s-r1
# eq1: d^2 = r1^2+r2^2-2*r1*r2*cos(omega)
# solve(eq1, r1)
d = length_(hanger_pos - cocked_cw_ctr)
s = link_sum
sol1 = -(sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) - s*cos(omega) - s)/(2*cos(omega)+2)
sol2 = (sqrt(s**2*cos(omega)**2 + 2*d**2*cos(omega)-s**2+2*d**2) + s*cos(omega) + s)/(2*cos(omega)+2)
upper_link_length = min(sol1,sol2)
lower_link_length = max(sol1,sol2)
if abs((upper_link_length+lower_link_length-link_sum)/link_sum) > 0.001:
print("link sum error")
print(" upper_link_length=", meter2foot(upper_link_length))
print(" lower_link_length=", meter2foot(lower_link_length))
print(" link_sum=", meter2foot(link_sum))
raise ValueError
cocked_connection_pos = circle_intersection(cocked_cw_ctr, lower_link_length,
hanger_pos, upper_link_length)
# all link angles measured at top of link
cocked_upper_link_angle = rot2radians(cocked_connection_pos - hanger_pos)
cocked_lower_link_angle = rot2radians(cocked_cw_ctr - cocked_connection_pos)
rest_upper_link_angle = rot2radians(rest_cw_ctr - hanger_pos)
rest_lower_link_angle = rest_upper_link_angle
rest_connection_pos = hanger_pos + upper_link_length * radians2rot(rest_upper_link_angle)
# end of short arm is on ellipse with foci at axle and cocked connection, with 'string' length
# distance from axle to rest connection point.
axle_rest_connection_distance = length_(rest_connection_pos)
ellipse_axis_angle = rot2radians(-cocked_connection_pos)
ellipse_a = axle_rest_connection_distance / 2.0
ellipse_f = length_(cocked_connection_pos) / 2.0
ellipse_e = ellipse_f / ellipse_a
theta = ellipse_axis_angle - cocked_upper_link_angle
connector_length = ellipse_a * (1-ellipse_e**2) / (1 - ellipse_e*cos(theta))
# cocked_connection angle measured at connection point
cocked_connection_angle = cocked_upper_link_angle
cocked_short_arm_end = cocked_connection_pos + connector_length * radians2rot(cocked_connection_angle)
short_arm_length = length_(cocked_short_arm_end)
if abs((short_arm_length + connector_length - axle_rest_connection_distance)/axle_rest_connection_distance) > 0.001:
print ("short arm length error:")
print (" ellipse_a=", meter2foot(ellipse_a))
print (" ellipse_f=", meter2foot(ellipse_f))
print (" ellipse_e=", ellipse_e)
print (" theta=", scipy.rad2deg(theta))
print (" connector_length=", meter2foot(connector_length))
print (" short_arm_length=", meter2foot(short_arm_length))
print (" axle_rest_connection_distance=",
meter2foot(axle_rest_connection_distance))
raise ValueError
# short arm angle measured at axle
cocked_short_arm_angle = rot2radians(cocked_short_arm_end)
# compute beta, angle from long arm to short arm
beta = pi + alpha - cocked_short_arm_angle
# long arm end, cocked
cocked_long_arm_end = long_arm_length * radians2rot(pi+alpha)
# other dimensions
pumpkin_diameter = inch2meter(8.0)
pumpkin_ctr = cocked_long_arm_end + np.array((sling_length, 0.0))
if debug:
# rest short arm angle and position (for printing only)
rest_short_arm_angle = rot2radians(rest_connection_pos)
rest_short_arm_end = short_arm_length * radians2rot(rest_short_arm_angle)
# rest long arm angle and position (for printing only)
rest_long_arm_angle = (pi+alpha) + (rest_short_arm_angle - cocked_short_arm_angle)
rest_long_arm_end = long_arm_length * radians2rot(rest_long_arm_angle)
print("slide_y=", meter2foot(slide_y))
print("long_arm_length=", meter2foot(long_arm_length))
print("pumpkin=", meter2foot(pumpkin_ctr))
print("hanger=", meter2foot(hanger_pos))
print("cocked_connection=", meter2foot(cocked_connection_pos))
print("cocked_cw=", meter2foot(cocked_cw_ctr))
print("cocked_short_arm=", meter2foot(cocked_short_arm_end))
print("cocked_long_arm=", meter2foot(cocked_long_arm_end))
print("cocked_lower_link_angle=", scipy.rad2deg(cocked_lower_link_angle))
print("rest_lower_link_angle=", scipy.rad2deg(rest_lower_link_angle))
print("connector_length=", meter2foot(connector_length))
print("lower_link_length=", meter2foot(lower_link_length))
print("rest_cw_ctr=", meter2foot(rest_cw_ctr))
print("rest_connection=", meter2foot(rest_connection_pos))
print("rest_short_arm=", meter2foot(rest_short_arm_end))
print("rest_long_arm=", meter2foot(rest_long_arm_end))
### Machine frame origin is at axle
sim.machineFrame=Frame(sim, "machine", theta=0, origin=(0,0))
sim.machineFrame.machine=Rectangle(sim.machineFrame,
l=hanger_pos[0]+2.0,
w=-slide_y+1.0,
theta=0,
origin=(hanger_pos[0]/2,
(slide_y)/2),
mass=lb2kgram(5000),
color=(0,0,0))
front_foot_pos = (hanger_pos[0], slide_y-0.5)
rear_foot_pos = (0, slide_y - 0.5)
sim.machineFrame.rear_foot=Rectangle(sim.machineFrame,
l=0.3,
w=0.1,
origin=rear_foot_pos,
mass=0,
color=(0,0,0))
sim.machineFrame.front_foot=Rectangle(sim.machineFrame,
l=0.3,
w=0.1,
origin=front_foot_pos,
mass=0,
color=(0,0,0))
### Arm frame origin is at axle. Framespace has long arm horizontal to the left
sim.armFrame=Frame(sim, "arm", theta=alpha, origin=(0,0))
sim.armFrame.long_arm=Beam(sim.armFrame,
x0=-long_arm_length, d0=arm_end_depth, t0=arm_end_thick,
x1=0, d1=arm_depth, t1=arm_thick,
density=pine_density,
color=(0.8,0.3,0))
sim.armFrame.short_arm=dynamics.object.Rectangle(sim.armFrame,
l=inch2meter(18.99),
w=inch2meter(8.0),
theta=-beta,
origin=(-inch2meter(15.0)*cos(beta),
inch2meter(15.0)*sin(beta)),
mass=lb2kgram(53),
color=(0.8,0.3,0))
sim.armFrame.connector_pin=dynamics.object.Circle(sim.armFrame,
radius=inch2meter(2.0),
origin=(-short_arm_length*cos(beta),
short_arm_length*sin(beta)),
mass=lb2kgram(1),
color=(0.8,0.3,0))
sim.armFrame.long_arm_plate=dynamics.object.Rectangle(sim.armFrame,
l=inch2meter(27.5),
w=inch2meter(8.0),
theta=0.0,
origin=(inch2meter(-6.25), 0),
mass=lb2kgram(63),
color=(0.8,0.3,0))
sim.armFrame.release_pin=dynamics.object.Circle(sim.armFrame,
radius=inch2meter(6),
origin=(-long_arm_length, 0),
mass=release_pin_mass, color=(1.0, 1.0, 1.0))
# Wdight frame origin is at pivot point, ramp horizontal to the right
cocked_ramp_angle = rot2radians(cocked_cw_ctr-hinge_pos)
sim.weightFrame=dynamics.frame.Frame(sim, "weight", theta=cocked_ramp_angle, origin=hinge_pos)
sim.weightFrame.ramp = dynamics.object.Rectangle(sim.weightFrame, l=ramp_length, w=inch2meter(4),
mass=0, color=(0.3,0.5,0.2),
origin = (ramp_length/2,0))
sim.weightFrame.cw = dynamics.object.Rectangle(sim.weightFrame, l=foot2meter(2.6), w=foot2meter(2.6),
color=(0.3,0.5,0.2),
mass=cw_mass,
origin = (cw_moment_arm,0),
moment = cw_moment)
# Lower link frame origin is at end of ramp
sim.lowerLinkFrame = dynamics.frame.Frame(sim, "lower link", origin=cocked_cw_ctr,
theta = cocked_lower_link_angle-pi)
sim.lowerLinkFrame.link = dynamics.object.Rectangle(sim.lowerLinkFrame, l=lower_link_length, w=inch2meter(6),
mass=lower_link_mass, color=(1.0,0.0,0.0),
origin=(lower_link_length/2, 0.0))
sim.lowerLinkFrame.axle=dynamics.object.Circle(sim.lowerLinkFrame,
radius=inch2meter(3),
origin=(lower_link_length, 0.0),
mass=link_axle_mass, color=(1.0, 0.0, 0.0))
# Upper link frame origin is the hanger
cocked_upper_link_angle = rot2radians(cocked_connection_pos-hanger_pos)
sim.upperLinkFrame = dynamics.frame.Frame(sim, "upper link", origin=hanger_pos,
theta = cocked_upper_link_angle)
sim.upperLinkFrame.link = dynamics.object.Rectangle(sim.upperLinkFrame, l=upper_link_length, w=inch2meter(6),
mass=upper_link_mass, color=(1.0,0.0,0.0),
origin=(upper_link_length/2, 0.0))
# Connector frame origin is the end of the short arm
sim.connectorFrame = dynamics.frame.Frame(sim, "connector", origin=cocked_short_arm_end,
theta = rot2radians(cocked_connection_pos - cocked_short_arm_end))
sim.connectorFrame.rod = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,
w=inch2meter(2),
mass=connector_rod_mass,
color=(0.0, 0.0, 0.0),
origin=(connector_length/2, 0.0))
sim.connectorFrame.stiffener = dynamics.object.Rectangle(sim.connectorFrame, l=connector_length,
w=inch2meter(4.0),
mass=lb2kgram(100),
color=(0.0, 0.0, 0.0),
origin=(connector_length/2, inch2meter(3.0)))
sim.connectorFrame.brace = dynamics.object.Rectangle(sim.connectorFrame, l=foot2meter(2),
w=inch2meter(4),
mass=connector_brace_mass,
color=(0.0, 0.0, 0.0),
origin=(connector_length-foot2meter(1), 0.0))
# Pumpkin
sim.pumpkinFrame=dynamics.frame.Frame(sim, "pumpkin", origin=pumpkin_ctr)
sim.pumpkinFrame.pumpkin=dynamics.object.Circle(sim.pumpkinFrame,
radius=pumpkin_diameter/2.0,
mass=pumpkin_mass, color=(1.0, 0.5, 0))
sim.pumpkinFrame.sling=dynamics.object.Circle(sim.pumpkinFrame,
radius=pumpkin_diameter/2.0,
mass=sling_mass, color=(1.0, 0.5, 0))
# initialize frames
for frame in sim.frames:
frame.init()
# define constraints
sim.rear_foot = Nail(sim, "rear foot",
obj=sim.machineFrame.rear_foot,
xobj=(0,0),
xworld=rear_foot_pos)
sim.front_foot = NailSpring(sim, "front foot",
obj=sim.machineFrame.front_foot,
xobj=(0,0),
x_world=front_foot_pos,
spring_constant=1e6,
damping_constant=500e3)
sim.axle = Pin(sim, "axle",
obj0=sim.armFrame.long_arm,
xobj0=(0, 0),
obj1=sim.machineFrame)
sim.hinge =Pin(sim, "hinge",
obj0=sim.weightFrame.ramp,
xobj0=(-ramp_length/2, 0.0),
obj1=sim.machineFrame)
sim.hanger = Pin(sim, "hanger",
obj0=sim.upperLinkFrame.link,
xobj0=(-upper_link_length/2.0,0.0),
obj1=sim.machineFrame)
sim.linkPin = Pin(sim, "linkPin",
obj0=sim.upperLinkFrame.link,
xobj0= (upper_link_length/2.0, 0.0),
obj1=sim.lowerLinkFrame.link,
xobj1 = (lower_link_length/2.0, 0.0))
sim.rampPin = dynamics.constraint.Pin(sim, "rampPin",
obj0=sim.weightFrame.ramp,
xobj0= (ramp_length/2.0, 0.0),
obj1=sim.lowerLinkFrame.link,
xobj1 = (-lower_link_length/2.0, 0.0))
sim.connectorPin1 = Pin(sim, "connectorPin1",
obj0=sim.armFrame.connector_pin,
xobj0=(0.0,0.0),
obj1=sim.connectorFrame.rod,
xobj1 = (-connector_length/2.0, 0.0))
sim.connectorPin2 = Pin(sim, "connectorPin2",
obj0=sim.upperLinkFrame.link,
xobj0=(upper_link_length/2.0,0.0),
obj1=sim.connectorFrame.rod,
xobj1 = (connector_length/2.0, 0.0))
sim.sling=Rod(sim, "sling",
obj0=sim.armFrame.long_arm, xobj0=(-long_arm_length,
0),
obj1=sim.pumpkinFrame.pumpkin, xobj1=(0.0,0.0),
length=sling_length)
'''
sim.trigger = Rod(sim, "trigger",
obj0=sim.pumpkinFrame.pumpkin,
xobj0= (0.0, 0.0),
obj1=sim.machineFrame.front_foot,
xobj1= (0.0,0.0))
'''
sim.slide=Shelf(sim, "slide",
obj=sim.pumpkinFrame.pumpkin,
xobj=(0,0),
height=slide_y)
if (dry_fire):
sim.sling.enabled = False
print( " running simulation")
from time import clock
tstart=clock()
sim.run(continue_sim, debug=debug)
print (" done: time=%g sec" % (clock()-tstart))
if not sim.release_time:
sim.range = Y2range(sim,sim.Y)
range_spline = scipy.interpolate.UnivariateSpline(sim.t, sim.range, k=3,s=0.0)
d0,t0 = max( (range,time) for range,time in zip(sim.range, sim.t) ) # find guess
sim.tmax = fsolve(range_spline, t0, args=1) # root of first derivative of range
sim.maxrange = range_spline(sim.tmax)
launchDegrees_spline = scipy.interpolate.UnivariateSpline(sim.t, Y2launchDegrees(sim.Y), k=3,s=0.0)
sim.launchDegrees = launchDegrees_spline(sim.tmax)
print (" distance=%g feet at %g sec" % (meter2foot(sim.maxrange), sim.tmax))
else:
sim.range=np.zeros(len(sim.t))
sim.maxrange=0
sim.Fmax = max(sim.hanger.Fvec())
print(" max force on hanger = %g pounds" % (newton2lb(sim.Fmax)))
return(sim)
def circle_intersection(ctr1, rad1, ctr2, rad2):
"""Return intersection of two circles.
Intersection returned is the one in the ccw direction from the vector
ctr1->ctr2.
"""
base_len = length_(ctr2-ctr1)
# alpha is angle from vector ctr1->ctr2 to vector ctr1->isect
alpha = acos( (base_len**2 + rad1**2 - rad2**2) / (2 * base_len * rad1) )
# beta is angle from positive x axis to vector ctr1->ctr2
beta = rot2radians(ctr2-ctr1)
isect = ctr1 + rad1*radians2rot(alpha+beta)
return isect
def continue_sim(sim, time, y):
"continue simulation?"
#if time>0.001:
# sim.trigger.enabled = False
if sim.slide.enabled:
shelf_force = sim.slide.forces[0][1]
if shelf_force < 0.0:
sim.slide.enabled = False
if 0:
if sim.sling.enabled:
v = sim.pumpkinFrame.v
angle = atan2(v.A[1], v.A[0])
if v.A[0] > 0.0 and v.A[1] > 0.0 and angle <= sim.release_angle:
sim.maxrange = Y2range(sim,y)[0]
sim.sling.enabled = False
#return False
return True
else:
if sim.release_time:
if time >= sim.release_time:
sim.sling.enabled = False
return True
if sim.armFrame.theta >= -3*pi/4:
return True
if sim.pumpkinFrame.v.A1[1] > 0:
return True
return False
def Y2range(sim, Y, with_air_friction=True):
if (len(Y.shape)==1):
Y = Y.reshape([1,len(Y)])
idx = sim.pumpkinFrame.idx
x0 = Y[:,6*idx]
y0 = Y[:,6*idx+1]
vx0 = Y[:,6*idx+3]
vy0 = Y[:,6*idx+4]
if not with_air_friction:
tof = 2.0 * vy0 / scipy.constants.g
tof[tof<0.0] = 0.0
return (tof*vx0)
else:
range = np.zeros(len(x0))
flight = Flight(mass=sim.pumpkinFrame.pumpkin.mass,
area=pi*sim.pumpkinFrame.pumpkin.radius**2)
for i in np.arange(len(x0)):
if (vy0[i] > 0) & (vx0[i] > 0):
flight.run([x0[i],y0[i]], [vx0[i],vy0[i]])
range[i] = flight.range()
return range
def Y2launchDegrees(Y):
if (len(Y.shape)==1):
Y = Y.reshape([1,len(Y)])
vx = Y[:,33]
vy = Y[:,34]
return (180./pi*np.arctan2(vy, vx))
def trebPEvec(sim):
return (sim.weightFrame.PEvec() +
sim.upperLinkFrame.PEvec() +
sim.lowerLinkFrame.PEvec() +
sim.connectorFrame.PEvec() +
sim.armFrame.PEvec())
def trebKEvec(sim):
return (sim.weightFrame.KEvec() +
sim.upperLinkFrame.KEvec() +
sim.lowerLinkFrame.KEvec() +
sim.connectorFrame.KEvec() +
sim.armFrame.KEvec())
def plotEnergies(sim):
plot (sim.t, trebPEvec(sim) - min(trebPEvec(sim)))
plot (sim.t, trebKEvec(sim))
plot (sim.t, (trebPEvec(sim) - min(trebPEvec(sim)) +
trebKEvec(sim)))
plot (sim.t, trebKEvec(sim))
plot (sim.t, sim.pumpkinFrame.KEvec() + sim.pumpkinFrame.PEvec())
def opt(X):
global sim, X0
X0 = X
print ("X=", X)
try:
sim = treb(debug=False, time_step=0.0001, sim_duration=0.7,
sling_length=X[0], link_sum=X[1], hanger_x=X[2], slide_y=-9)
#return -sim.maxrange
return -sim.maxrange / sim.Fmax**0.10
except KeyboardInterrupt:
raise KeyboardInterrupt
except:
return 0.0
#X0 = array([ 8.70381, 6.08564, 10.3123 ])
#X0 = array([ 8, 6, 10 ])
#X0 = [ 9.62859, 6.23794, 9.98966]
#X0 = [ 8.70153, 6.04452, 10.43426]
#X0 = array([ 8.68625, 6.00475, 10.44 ])
#X0 = array([ 8.21222, 5.58682, 11.43518, -9.0])
#X0 = array([8.411, 5.587, 11.433])
X0 = np.array([8.54665, 5.587, 11.38508])
#lower = array([ 6.0, 3.0, 5.0])
#upper = array([ 12.0, 9.0, 12.0])
#result=scipy.optimize.fmin(opt, X0)
#result=scipy.optimize.fmin_l_bfgs_b(opt, X0, approx_grad=True, bounds=None)
#result=scipy.optimize.anneal(opt, X0, lower=lower, upper=upper, T0=0.001, feps=1e-60, full_output=True)
if __name__ == '__main__':
sim=treb(debug=True)
anim=Animation(sim, Y2range) | {
"repo_name": "treygreer/treb",
"path": "treb_sim/src/first_in_fright_2012.py",
"copies": "1",
"size": "25532",
"license": "mit",
"hash": -4166431940091779000,
"line_mean": 45.6782449726,
"line_max": 120,
"alpha_frac": 0.5268290772,
"autogenerated": false,
"ratio": 3.4331047465375826,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9391799402422958,
"avg_score": 0.013626884262924754,
"num_lines": 547
} |
# a bit of tweaking on search path in order to easily import source files.
import sys
import os
sources = os.path.abspath(os.path.join(os.path.dirname(__file__),'../src'))
sys.path.insert(0,sources)
from file_stub import *
from kicad_pcb import *
import unittest
class KicadPcb_TestCase(unittest.TestCase):
'Tests for KiCad file loader and parser'
def setUp(self):
self.loader = KicadPcb()
def test_modules_whenEmptyBoardIsLoaded_ContainsZeroComponents(self):
board = '''(kicad_pcb (version 3) (host pcbnew "(2013-12-14 BZR 4555)-product") )'''
file = FileStub(board)
pcb = self.loader.load(file)
self.assertEqual(0, len(pcb.components))
def test_modules_whenBoardWithRectangularPcbEdgeOnly_ContainsZeroComponentsAndShapePolygonOf5Points(self):
board = '''(kicad_pcb (version 3) (host pcbnew "(2013-12-14 BZR 4555)-product")
(general
(drawings 4)
)
(layers
(28 Edge.Cuts user)
)
(gr_line (start 53.34 48.26) (end 53.34 35.56) (angle 90) (layer Edge.Cuts) (width 0.15))
(gr_line (start 73.66 48.26) (end 53.34 48.26) (angle 90) (layer Edge.Cuts) (width 0.15))
(gr_line (start 73.66 35.56) (end 73.66 48.26) (angle 90) (layer Edge.Cuts) (width 0.15))
(gr_line (start 53.34 35.56) (end 73.66 35.56) (angle 90) (layer Edge.Cuts) (width 0.15))
)'''
file = FileStub(board)
pcb = self.loader.load(file)
self.assertEqual(0, len(pcb.components))
self.assertEqual(5, len(pcb.shapePolygon))
def test_modules_whenBoardWithTriangularPcbEdgeOnly_ContainsZeroComponentsAndShapePolygonOf4Points(self):
board = '''(kicad_pcb (version 3) (host pcbnew "(2013-12-14 BZR 4555)-product")
(general
(drawings 3)
)
(layers
(28 Edge.Cuts user)
)
(gr_line (start 73.66 40.64) (end 60.96 40.64) (angle 90) (layer Edge.Cuts) (width 0.15))
(gr_line (start 73.66 27.94) (end 73.66 40.64) (angle 90) (layer Edge.Cuts) (width 0.15))
(gr_line (start 60.96 40.64) (end 73.66 27.94) (angle 90) (layer Edge.Cuts) (width 0.15))
)'''
file = FileStub(board)
pcb = self.loader.load(file)
self.assertEqual(0, len(pcb.components))
self.assertEqual(4, len(pcb.shapePolygon))
def test_modules_whenBoardWithRectangularGraphicsOnlyOnNonEdge_ContainsZeroComponentsAndShapePolygonOfZeroPoints(self):
board = '''(kicad_pcb (version 3) (host pcbnew "(2013-12-14 BZR 4555)-product")
(general
(drawings 4)
)
(layers
(24 Dwgs.User user)
)
(gr_line (start 53.34 48.26) (end 53.34 35.56) (angle 90) (layer Dwgs.User) (width 0.15))
(gr_line (start 73.66 48.26) (end 53.34 48.26) (angle 90) (layer Dwgs.User) (width 0.15))
(gr_line (start 73.66 35.56) (end 73.66 48.26) (angle 90) (layer Dwgs.User) (width 0.15))
(gr_line (start 53.34 35.56) (end 73.66 35.56) (angle 90) (layer Dwgs.User) (width 0.15))
)'''
file = FileStub(board)
pcb = self.loader.load(file)
self.assertEqual(0, len(pcb.components))
self.assertEqual(0, len(pcb.shapePolygon))
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "achary/kicad-3d",
"path": "tests/kicad_pcb_test.py",
"copies": "1",
"size": "3730",
"license": "mit",
"hash": -7803804734575118000,
"line_mean": 46.8205128205,
"line_max": 123,
"alpha_frac": 0.5436997319,
"autogenerated": false,
"ratio": 3.4157509157509156,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44594506476509155,
"avg_score": null,
"num_lines": null
} |
""" Abiword plugin for PubTal
Copyright (c) 2003 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import os, os.path
import logging
from pubtal import SitePublisher
from simpletal import simpleTAL, simpleTALES
import AbiwordToHTMLConverter
def getPluginInfo ():
builtInContent = [{'functionality': 'content', 'content-type': 'Abiword' ,'file-type': 'abw','class': AbiwordPagePublisher}]
return builtInContent
class AbiwordPagePublisher (SitePublisher.ContentPublisher):
def __init__ (self, pagePublisher):
SitePublisher.ContentPublisher.__init__ (self, pagePublisher)
self.log = logging.getLogger ("PubTal.AbiwordPagePublisher")
self.converter = AbiwordToHTMLConverter.AbiwordToHTMLConverter()
def publish (self, page):
template = self.templateConfig.getTemplate (page.getOption ('template', 'template.html'))
context = simpleTALES.Context(allowPythonPath=1)
# Get the page context for this content
map = self.getPageContext (page, template)
context.addGlobal ('page', map)
macros = page.getMacros()
# Determine the destination for this page
relativeDestPath = map ['destinationPath']
self.pagePublisher.expandTemplate (template, context, relativeDestPath, macros)
def getPageContext (self, page, template):
pageMap = SitePublisher.ContentPublisher.getPageContext (self, page, template)
rawFile = open (page.getSource(), 'r')
# Parse it
self.converter.convertContent (rawFile)
rawFile.close()
headers = self.converter.getMetadata()
content = self.converter.getBody()
footNotes = self.converter.getFootnotes()
actualHeaders = pageMap ['headers']
actualHeaders.update (headers)
pageMap ['headers'] = actualHeaders
pageMap ['content'] = content
pageMap ['footnotes'] = footNotes
return pageMap
| {
"repo_name": "owlfish/pubtal",
"path": "optional-plugins/abiwordContent/__init__.py",
"copies": "2",
"size": "3234",
"license": "bsd-3-clause",
"hash": 8953143160448503000,
"line_mean": 38.4512195122,
"line_max": 125,
"alpha_frac": 0.7665429808,
"autogenerated": false,
"ratio": 3.9680981595092026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9971944714624836,
"avg_score": 0.15253928513687326,
"num_lines": 82
} |
""" Abiword to HTML Converter for PubTal
Copyright (c) 2003 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import xml.sax, StringIO, cgi
import logging
#font-weight: bold; font-style: italic; text-decoration: underline, line-through, overline
HTML_StyleMap = {'italic': ('font-style', 'italic'), 'bold': ('font-weight', 'bold')
,'subscript': ('vertical-align', 'sub'), 'superscript': ('vertical-align', 'super')
,'underline': ('text-decoration', 'underline'), 'line-through': ('text-decoration', 'line-through')
,'overline': ('text-decoration', 'overline')}
HTML_StartTagMap = {'text-style': '<span style="%s">', 'Bullet List': '<ul>'
,'Numbered List': '<ol>', 'List Item': '<li>', 'link': '<a href="%s">'
,'Start Bookmark': '<a name="%s">'
,'Start endnote': '<a href="#%s">%s</a>'
,'Endnote Anchor': '<a name="%s" style="vertical-align: super">%s</a>'
,'table': '<table>', 'tablerow': '<tr>', 'tablecell': '<td%s>'
,'p': '<p>', 'h1': '<h1>', 'h2': '<h2>', 'h3': '<h3>', 'h4': '<h4>'
, 'h5': '<h5>', 'Plain Text': '<pre>'}
# Note that we don't have any <br> end tag - it's not used in either HTML or XHTML
HTML_EndTagMap = {'text-style': '</span>', 'Bullet List': '</ul>'
,'Numbered List': '</ol>', 'List Item': '</li>', 'link': '</a>'
,'End Bookmark': '</a>'
,'table': '</table>', 'tablerow': '</tr>', 'tablecell': '</td>'
,'p': '</p>', 'h1': '</h1>', 'h2': '</h2>', 'h3': '</h3>', 'h4': '</h4>'
, 'h5': '</h5>', 'Plain Text': '</pre>'}
class AbiwordToHTMLConverter (xml.sax.handler.ContentHandler, xml.sax.handler.DTDHandler):
""" Convert AbiWord format to HTML or XHTML
"""
def __init__ (self):
xml.sax.handler.ContentHandler.__init__ (self)
self.log = logging.getLogger ("PubTal.AbiwordToHTMLConverter")
def convertContent (self, content):
self.result = StringIO.StringIO()
self.scopeStack = []
self.StartTagMap = HTML_StartTagMap
self.EndTagMap = HTML_EndTagMap
self.StyleMap = HTML_StyleMap
self.ourParser = xml.sax.make_parser()
self.log.debug ("Setting features of parser")
self.ourParser.setFeature (xml.sax.handler.feature_external_ges, 0)
self.ourParser.setFeature (xml.sax.handler.feature_namespaces, 0)
self.ourParser.setContentHandler (self)
# Initialise our state
self.metaData = {}
self.data = []
self.currentAttributes = None
self.statefulMarkup = StatefulMarkup (self.result, self.StartTagMap, self.EndTagMap)
# Dictionary of current text styles (e.g. bold, italic, etc)
self.textStyle = {}
# List of endNotes that we've built up. Tuple of (linkName, linkHTML)
self.endNoteNum = 1
self.endNoteToNumMap = {}
self.endNotes = []
# Parse the content as XML
self.ourParser.parse (content)
def getBody (self):
return self.result.getvalue()
def getFootnotes (self):
return u"".join (self.endNotes)
def getMetadata (self):
return self.metaData
def startElement (self, tag, attributes):
self.log.debug ("Recieved Start Tag: " + tag + " Attributes: " + str (attributes))
self.currentAttributes = attributes
propertiesList = attributes.get ('props', "").split (';')
properties = {}
for prop in propertiesList:
breakPoint = prop.find (':')
properties [prop[0:breakPoint].strip()] = prop [breakPoint + 1:].strip()
self.log.debug ("Character properties: %s" % str (properties))
if (tag == "abiword"):
try:
fileformat = attributes ['fileformat']
except:
msg = ("No fileformat attribute on abiword element!")
self.log.error (msg)
raise AbiwordFormatException (msg)
if (fileformat != "1.1"):
self.log.warn ("Only file format 1.1 has been tested. Content is version %s" % fileformat)
elif (tag == "p"):
self.data = []
self.statefulMarkup.startParagraph (tag, attributes, properties)
elif (tag == "c"):
self.writeStyledText()
if (properties.get ("font-weight", "") == "bold"):
self.textStyle ['bold'] = 1
if (properties.get ("font-style","") == "italic"):
self.textStyle ['italic'] = 1
# This handles superscript and subscript
textPosition = properties.get ("text-position", "")
self.textStyle [textPosition] = 1
# This handles overline, line-through, and underline
textDecoration = properties.get ("text-decoration", "").split (" ")
for decor in textDecoration:
self.textStyle [decor] = 1
elif (tag == "a"):
linkDest = attributes ['xlink:href']
self.result.write (self.StartTagMap ['link'] % cgi.escape (linkDest))
elif (tag == "br"):
# Write out any styled text and re-open SPANs as needed.
self.writeStyledText()
self.result.write (self.StartTagMap ['br'])
elif (tag == "bookmark"):
self.writeStyledText()
self.statefulMarkup.startBookmark (tag, attributes, properties)
elif (tag == "field"):
self.writeStyledText()
# Is this a footnote or endnote?
type = attributes ['type']
id = None
if (type == "footnote_ref"):
id = "footnote-id-%s" % attributes ['footnote-id']
self.endNoteToNumMap [id] = self.endNoteNum
self.result.write (self.StartTagMap ['Start endnote'] % (id, str (self.endNoteNum)))
self.endNoteNum = self.endNoteNum + 1
elif (type == "endnote_ref"):
id = "endnote-id-%s" % attributes ['endnote-id']
self.endNoteToNumMap [id] = self.endNoteNum
self.result.write (self.StartTagMap ['Start endnote'] % (id, str (self.endNoteNum)))
self.endNoteNum += 1
elif (type == "endnote_anchor"):
# The anchor text.
id = "endnote-id-%s" % attributes ['endnote-id']
self.result.write (self.StartTagMap ['Endnote Anchor'] % (id, str (self.endNoteToNumMap[id])))
elif (type == "footnote_anchor"):
# The anchor text for a footnote.
id = "footnote-id-%s" % attributes ['footnote-id']
self.result.write (self.StartTagMap ['Endnote Anchor'] % (id, str (self.endNoteToNumMap[id])))
elif (tag == "foot" or tag == "endnote"):
# Capture the footnote/endnote separately.
self.scopeStack.append ((self.result, self.statefulMarkup))
self.result = StringIO.StringIO()
self.statefulMarkup = StatefulMarkup (self.result, self.StartTagMap, self.EndTagMap)
elif (tag == "table"):
# The begining of a table can mean the end of a list.
self.statefulMarkup.structureChange()
self.result.write (self.StartTagMap ['table'])
elif (tag == "cell"):
leftAttach = int (properties ['left-attach'])
rightAttach = int (properties ['right-attach'])
bottomAttach = int (properties ['bot-attach'])
topAttach = int (properties ['top-attach'])
width = rightAttach - leftAttach
cellAtts = u""
if (width > 1):
cellAtts += ' colspan="%s"' % str (width)
height = bottomAttach - topAttach
if (height > 1):
cellAtts += ' rowspan="%s"' % str (height)
# Do we have to close a TR?
if (leftAttach == 0):
if (topAttach != 0):
# This isn't the first row, so we need to close a previous one!
self.result.write (self.EndTagMap ['tablerow'])
self.result.write (self.StartTagMap ['tablerow'])
self.result.write (self.StartTagMap ['tablecell'] % cellAtts)
elif (tag == "m"):
# For metadata we want to clear out any previous text we've accumulated.
self.data = []
else:
#self.log.warn ("Unknown start element %s" % tag)
self.statefulMarkup.structureChange()
def endElement (self, tag):
self.log.debug ("Recieved Real End Tag: " + tag)
if (tag == "m"):
keyName = self.currentAttributes ['key']
if (keyName.startswith ("dc.")):
keyName = keyName [3:]
if (keyName == "creator"):
# Used in PubTal to keep things the same as the examples.
keyName = "author"
data = u"".join (self.data)
self.log.debug ("Meta information key=%s value=%s" % (keyName, data))
self.metaData [keyName] = data
elif (tag == "p"):
self.writeStyledText()
self.statefulMarkup.endParagraph (tag)
elif (tag == "c"):
self.writeStyledText()
self.textStyle = {}
elif (tag == "a"):
self.result.write (self.EndTagMap ['link'])
elif (tag == "foot" or tag == "endnote"):
self.endNotes.append (self.result.getvalue())
self.result, self.statefulMarkup = self.scopeStack.pop()
elif (tag == "table"):
self.statefulMarkup.structureChange()
self.result.write (self.EndTagMap ['tablerow'])
self.result.write (self.EndTagMap ['table'])
elif (tag == "cell"):
# Ends of cells can mean the end of a list - best check
self.statefulMarkup.structureChange()
self.result.write (self.EndTagMap ['tablecell'])
elif (tag == "bookmark"):
pass
elif (tag == "field"):
pass
else:
#self.log.warn ("Unknown end element %s" % tag)
self.statefulMarkup.structureChange()
def characters (self, data):
# Accumulate the character data together so that we can merge all the newline events
self.log.debug ("Recieved character data: " + data)
self.data.append (data)
def writeStyledText (self):
if (len (self.data) == 0):
self.log.debug ("No text to write.")
return
styleDictionary = {}
for style in self.textStyle.keys():
styleProperty, styleValue = self.StyleMap.get (style, (None, None))
if (styleProperty is not None):
curPropVal = styleDictionary.get (styleProperty, u"")
if (len (curPropVal) > 0):
curPropVal += ', ' + styleValue
else:
curPropVal = styleValue
styleDictionary [styleProperty] = curPropVal
# Now build the style attribute value.
if (len (styleDictionary) > 0):
styleValueList = []
for property in styleDictionary.keys():
# Get the value for this property
value = styleDictionary [property]
styleValueList.append (property + ": " + value)
self.result.write (self.StartTagMap ['text-style'] % u"; ".join (styleValueList))
# Write out the text
self.result.write (cgi.escape (u"".join (self.data)))
self.data = []
if (len (styleDictionary) > 0):
self.result.write (self.EndTagMap ['text-style'])
class StatefulMarkup:
def __init__ (self, result, startTagMap, endTagMap):
""" The StatefulMarkup class is used to maintain the context for
either the main document or a footnote or endnote.
It handles the complications of lists.
"""
self.log = logging.getLogger ("PubTal.AbiwordToHTMLConverter.StatefulMarkup")
self.result = result
self.StartTagMap = startTagMap
self.EndTagMap = endTagMap
self.paragraphType = None
# List of currently open boomark (anchor) links.
self.bookmarks = []
# Current stack of lists.
self.listStack = []
def startParagraph (self, tag, attributes, properties):
paragraphType = attributes.get ('style', "")
self.log.debug ("Starting a new paragraph, type %s" % paragraphType)
if (attributes.has_key ('listid')):
# This is a list item.
listStyle = properties.get ('list-style', 'Bullet List')
listLevel = attributes ['level']
if (len (self.listStack) > 0):
# We already have a list opened, so let's compare levels
oldListLevel, oldListType = self.listStack[-1]
if (oldListLevel < listLevel):
# We are growing outwards with this item.
self.result.write (self.StartTagMap [listStyle])
# Add this list to the stack
self.listStack.append ((listLevel, listStyle))
elif (oldListLevel > listLevel):
# We are going down a level!
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the actual list
self.result.write (self.EndTagMap [oldListType])
# Also close out the containing list item.
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
self.listStack.pop()
else:
# This is an item in an existing list, so close out the last item.
self.result.write (self.EndTagMap ['List Item'])
else:
# This is the first item in a new list!
# Add this list to the stack
self.listStack.append ((listLevel, listStyle))
self.result.write (self.StartTagMap [listStyle])
# This paragraph type is really a list item.
self.paragraphType = "List Item"
else:
# This is not a list item - check for the possibility of an open list
while (len (self.listStack) > 0):
self.log.debug ("We have an open list, but the next P element is not a list item!")
oldListLevel, oldListType = self.listStack.pop()
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the old list type
self.result.write (self.EndTagMap [oldListType])
if (paragraphType.startswith ("Heading")):
headingLevel = paragraphType [-1:]
self.paragraphType = u"h" + headingLevel
elif (paragraphType == "Plain Text"):
self.paragraphType = "Plain Text"
else:
self.paragraphType = "p"
self.result.write (self.StartTagMap [self.paragraphType])
def endParagraph (self, tag):
self.log.debug ("Closing paragraph of type %s" % self.paragraphType)
while (len (self.bookmarks) > 0):
oldBookmark = self.bookmarks.pop()
self.result.write (self.EndTagMap ['End Bookmark'])
# Don't write out the </li> for lists here - it depends on what follows next!
if (self.paragraphType != 'List Item'):
self.result.write (self.EndTagMap [self.paragraphType] + '\n')
def startBookmark (self, tag, attributes, properties):
# Is this the start, or end of a bookmark?
type = attributes ['type']
name = attributes ['name']
if (type == "end" and name in self.bookmarks):
# Closing a bookmark
self.result.write (self.EndTagMap ['End Bookmark'])
self.bookmarks.remove (name)
elif (type == "start"):
# Opening a new bookmark.
self.result.write (self.StartTagMap ['Start Bookmark'] % name)
self.bookmarks.append (name)
def structureChange (self):
""" Called to indicate that the next tag type was not a paragraph.
Used for when <table> closes a list, etc.
"""
while (len (self.listStack) > 0):
self.log.debug ("We have an open list, but the next P element is not a list item!")
oldListLevel, oldListType = self.listStack.pop()
# Take this opportunity to close out the list item.
self.result.write (self.EndTagMap ['List Item'])
# Close the old list type
self.result.write (self.EndTagMap [oldListType])
class AbiwordFormatException (Exception):
pass
| {
"repo_name": "owlfish/pubtal",
"path": "mytesting/abiwordContent/AbiwordToHTMLConverter.py",
"copies": "1",
"size": "15706",
"license": "bsd-3-clause",
"hash": -6807853865313151000,
"line_mean": 39.3753213368,
"line_max": 103,
"alpha_frac": 0.6711447854,
"autogenerated": false,
"ratio": 3.317001055966209,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9037653284795151,
"avg_score": 0.09009851131421151,
"num_lines": 389
} |
a = 'blah {foo-bar %d'
a = 'blah {foo-bar %d}'
a = 'blah {foo-bar %d //insane {}}'
a = '{}blah {foo-bar %d //insane {}}'
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
blah {foo-bar : source.python, string.quoted.single.python
%d : constant.character.format.placeholder.other.python, meta.format.percent.python, source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
blah : source.python, string.quoted.single.python
{foo-bar : source.python, string.quoted.single.python
%d : constant.character.format.placeholder.other.python, meta.format.percent.python, source.python, string.quoted.single.python
} : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
blah : source.python, string.quoted.single.python
{foo-bar : source.python, string.quoted.single.python
%d : constant.character.format.placeholder.other.python, meta.format.percent.python, source.python, string.quoted.single.python
//insane {}} : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
a : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.single.python
{} : constant.character.format.placeholder.other.python, meta.format.brace.python, source.python, string.quoted.single.python
blah : source.python, string.quoted.single.python
{foo-bar : source.python, string.quoted.single.python
%d : constant.character.format.placeholder.other.python, meta.format.percent.python, source.python, string.quoted.single.python
//insane {}} : source.python, string.quoted.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.single.python
| {
"repo_name": "MagicStack/MagicPython",
"path": "test/strings/format9.py",
"copies": "1",
"size": "2848",
"license": "mit",
"hash": -7936939987525237000,
"line_mean": 60.9130434783,
"line_max": 138,
"alpha_frac": 0.6664325843,
"autogenerated": false,
"ratio": 3.896032831737346,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 46
} |
#ablerCFLregionTest2.py
import time, os
from armor import pattern
dbz = pattern.DBZ
np = pattern.np
dp = pattern.dp
plt = pattern.plt
ma = pattern.plt
from armor.geometry import transforms
from armor.geometry import transformedCorrelations as trc
outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs2/ABLERCFLregion/'
arrayShape = np.array((200, 200))
m = np.ma.zeros(arrayShape)
m.mask=False
m = dbz(matrix=m)
m.show()
I, J = m.IJ()
dg = trc.doubleGaussian(I, J, centroid_i=100, centroid_j=150, sigma_i=20, sigma_j=50, theta=1 )
DG = dbz(matrix=np.ma.array(dg))
DG.vmin=0
DG.vmax=2
DG.show()
DG.saveImage(outputFolder+'DG_i100_j150_sigmai20_sigmaj50_theta1.png')
# test case: one double gaussian
params = np.random.random(5)
params *= [200, 200, 20, 30, np.pi]
params += [0 , 0, 5, 5, 0]
dg = trc.doubleGaussian(I, J, *params)
DG = dbz(matrix=np.ma.array(dg))
DG.vmin=0
DG.vmax=1
DG.name = 'Centroid = ' + str(params[0:2].round(2)) + ', sigma i,j = ' + str(params[2:4].round(2)) + ',\ntheta ' + str(params[4])
DG.show()
# test case: 100 double gaussians
#dgs = []
#paramsList = []
#N = 100
#for i in range(N):
# params = np.random.random(5)
# params *= [200, 200, 20, 30, np.pi]
# params[2] = params[3] #hack
# paramsList.append(params)
dgs = []
paramsList = []
N=20
for i in range(N): #2014-11-04
params = np.random.random(5)
params *= [50, 50, 20, 15, np.pi]
params[2] = params[3] #hack
params += [75, 75, 0, 0, 0]
paramsList.append(params)
for i in range(N):
dg = trc.doubleGaussian(I, J, *paramsList[i])
dgs.append(dg)
DG = dbz(matrix = sum(dgs))
DG.setMaxMin()
DG.name='Sum of ' +str(N) + ' double gaussians'
DG.saveImage(outputFolder+'sumOf%dDoubleGaussians_'%N + str(int(time.time()))+'.jpg')
DG.show()
# transformed - radiated from origin
def getParamsList(N= 100, maxRadius=10, minRadius=2):
paramsList = []
for i in range(N):
params = np.random.random(5)
params *= [200, 200, maxRadius-minRadius+1, maxRadius-minRadius+1, np.pi]
params += [0, 0, minRadius, minRadius, 0]
params[2] = params[3] #hack
#dg = trc.doubleGaussian(I, J, *params)
dgs.append(dg)
paramsList.append(params)
return paramsList
def affineTransform(dilation=1.0, rotation=0.0, translation=(0,0)):
# 1. build the rotation matrix
# 2. build the dilation matrix
# 3. compute the transformation parameters
# 4. transform the function
pass
def doubleGaussianFunction(centroid_i, centroid_j, sigma_i, sigma_j, theta):
cos = np.cos
sin = np.sin
def dg(I, J):
I1 = I-centroid_i
J1 = J-centroid_j
I2 = cos(theta)*I1 - sin(theta)*J1
J2 = sin(theta)*I1 + cos(theta)*J1
I2 += centroid_i
J2 += centroid_j
return np.exp( - (I2-centroid_i)**2 / (2*sigma_i**2) \
- (J2-centroid_j)**2 / (2*sigma_j**2) )
return dg
def doubleGaussianLandscape(paramsList):
DGS = [doubleGaussianFunction(*v) for v in paramsList]
def img(i,j):
return sum([dg(i,j) for dg in DGS])
return img
def constructImage(paramsList, display=False):
DGS = [doubleGaussianFunction(*v) for v in paramsList]
img = sum([dg(I,J) for dg in DGS])
#plt.imshow(img, origin='lower') ; plt.show()
img = np.ma.array(img)
img.mask=False
IMG = dbz(matrix=img)
IMG.setMaxMin()
if display:
IMG.show()
return IMG
def rotate(phi, paramsList=paramsList, centre=arrayShape/2,):
cos = np.cos
sin = np.sin
def f(params):
centroid_i, centroid_j, sigma_i, sigma_j, theta = params
I1 = centroid_i - centre[0]
J1 = centroid_j - centre[1]
I2 = cos(phi)*I1 - sin(phi)*J1
J2 = sin(phi)*I1 + cos(phi)*J1
I2 += centre[0]
J2 += centre[1]
params = [I2, J2, sigma_i, sigma_j, theta+phi ]
return params
paramsList1 = [f(params) for params in paramsList]
return paramsList1
def stretch(factor_i, factor_j, paramsList=paramsList, centre=arrayShape/2):
def g(params):
params1 = params.copy() #hack
#print params
params1 -= [centre[0], centre[1], 0, 0, 0]
params1 *= [factor_i, factor_j, factor_i, factor_j, 1]
params1 += [centre[0], centre[1], 0, 0, 0]
#print params
#time.sleep(1)
return params1
paramsList1 = [g(params) for params in paramsList]
return paramsList1
def translate(i, j, paramsList=paramsList):
paramsList1 = [params+[i, j, 0,0,0] for params in paramsList]
return paramsList1
def plotRsquared(p0=paramsList, transform='rotation', rlimit=0.05, step=0.002, *args, **kwargs):
timeStamp = str(int(time.time()))
IMG0 = constructImage(p0)
plt.close()
xs=np.arange(0, rlimit, step)
ys=[]
print '\n-----------------\n'
print transform
for x in xs:
if transform=='rotation':
p1 = rotate(phi=x, paramsList=p0, *args, **kwargs)
elif transform=='stretching':
p1 = stretch(1+x, 1-x, paramsList=p0, *args, **kwargs)
IMG1 = constructImage(p1)
IMG1.name = transform + str(x)
IMG1.show()
Rsquared = IMG0.shiiba(IMG1, searchWindowWidth=9, searchWindowHeight=9)['Rsquared']
print 'x:', x
print 'Rsquared', Rsquared
ys.append(Rsquared)
plt.clf()
plt.plot(xs, ys)
title = transform+": Rsquared versus change" + "(radians)" * (transform=='rotation') + " relative stretching" *(transform=='stretching')
plt.title(title)
plt.savefig(outputFolder+ timeStamp + "_Rsquared versus change plot - " + transform + '.jpg')
return ys
def transform_and_analyse(p0=paramsList, transform='rotation', rlimit=0.20, step=0.01, outputFolder=outputFolder, *args, **kwargs):
timeStamp = str(int(time.time()))
logFile = open(outputFolder+timeStamp+'_%s_logFile.txt'%transform,'a')
logFile.write('#x, Rsquared, c1t, c2t, c4t, c5t, c1, c2, c4, c5\n')
print 'output file:', outputFolder+timeStamp+'_%s_logFile.txt'%transform
time.sleep(3)
cos = np.cos
sin = np.sin
IMG0 = constructImage(p0)
plt.close()
xs=np.arange(0, rlimit, step)
ys=[]
print '\n-----------------\n'
print transform
for x in xs:
if transform=='rotation':
p1 = rotate(phi=x, paramsList=p0, *args, **kwargs)
c1t = cos(x) -1 # theoretical "shiiba" C-values
c2t = -sin(x)
c4t = sin(x)
c5t = cos(x) -1
elif transform=='stretching':
p1 = stretch(1+x, 1-x, paramsList=p0, *args, **kwargs)
c1t = 1+x -1 # theoretical "shiiba" C-values
c2t = 0
c4t = 0
c5t = 1-x-1
IMG1 = constructImage(p1)
IMG1.name = transform + str(x)
#IMG1.show()
res = IMG0.shiiba(IMG1, searchWindowWidth=9, searchWindowHeight=9)
Rsquared = res['Rsquared']
C = res['C']
c1 = C[0] # experimental "shiiba" C-values
c2 = C[1]
c4 = C[3]
c5 = C[4]
print 'x:', x
print 'Rsquared', Rsquared
ys.append(Rsquared)
outputString = ', '.join([str(v) for v in [x, Rsquared, c1t, c2t, c4t, c5t, c1, c2, c4, c5]]) +'\n'
print outputString
logFile.write(outputString)
plt.clf()
plt.plot(xs, ys)
title = transform+": Rsquared versus change" + "(radians)" * (transform=='rotation') + " relative stretching" *(transform=='stretching')
plt.title(title)
plt.savefig(outputFolder+ timeStamp + "_Rsquared versus change plot - " + transform + '.jpg')
logFile.close()
return ys
###########################################
# tests
# rotation
timeStamp = str(int(time.time()))
paramsList1 = rotate(np.pi/18, paramsList)
a = constructImage(paramsList)
b = constructImage(paramsList1)
c = a-b
c.setMaxMin()
a.imagePath = outputFolder + timeStamp +"_a.jpg"
b.imagePath = outputFolder + timeStamp +"_b.jpg"
a.drawCross(newObject=False)
b.drawCross(newObject=False)
b.name='rotation'
a.saveImage()
b.saveImage()
# stretch
timeStamp = str(int(time.time()))
paramsList1 = stretch(0.9, 1.1, paramsList)
a = constructImage(paramsList)
b = constructImage(paramsList1)
c = a-b
c.setMaxMin()
c.show()
a.imagePath = outputFolder + timeStamp +"_a.jpg"
b.imagePath = outputFolder + timeStamp +"_b.jpg"
a.drawCross(newObject=False)
b.drawCross(newObject=False)
b.name='stretching'
a.saveImage()
b.saveImage()
# test case ABLER
#ys1 = plotRsquared(paramsList, transform='rotation')
#ys2 = plotRsquared(paramsList, step=0.02, rlimit=0.5, transform='stretching')
# looping
for count in range(10):
paramsList = getParamsList(100)
a = constructImage(paramsList)
# rotation
timeStamp = str(int(time.time()))
paramsList1 = rotate(np.pi/18, paramsList)
a = constructImage(paramsList)
b = constructImage(paramsList1)
a.name='original'
b.name='rotation'
#c = a-b
#c.setMaxMin()
a.imagePath = outputFolder + timeStamp +"_a.jpg"
b.imagePath = outputFolder + timeStamp +"_b.jpg"
a.drawCross(newObject=False)
b.drawCross(newObject=False)
a.saveImage()
b.saveImage()
# stretch
timeStamp = str(int(time.time()))
paramsList1 = stretch(0.9, 1.1, paramsList)
a = constructImage(paramsList)
b = constructImage(paramsList1)
a.name='original'
b.name='stretching'
a.imagePath = outputFolder + timeStamp +"_a.jpg"
b.imagePath = outputFolder + timeStamp +"_b.jpg"
a.drawCross(newObject=False)
b.drawCross(newObject=False)
b.saveImage()
ys1 = transform_and_analyse(paramsList, transform='rotation', rlimit=0.20, step=0.01)
ys1a = transform_and_analyse(paramsList, transform='rotation', rlimit=0.05, step=0.002)
ys2 = transform_and_analyse(paramsList, transform='stretching', rlimit=0.20, step=0.01)
ys2a = transform_and_analyse(paramsList, transform='stretching', rlimit=0.05, step=0.002)
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/ablerCFLregionTest2.py",
"copies": "1",
"size": "10168",
"license": "cc0-1.0",
"hash": 3286458954127436000,
"line_mean": 31.3821656051,
"line_max": 140,
"alpha_frac": 0.6075924469,
"autogenerated": false,
"ratio": 2.915137614678899,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40227300615788986,
"avg_score": null,
"num_lines": null
} |
#ablerCFLregionTest.py
import time, os
from armor import pattern
dbz = pattern.DBZ
np = pattern.np
dp = pattern.dp
from armor.geometry import transforms as tr
outputFolder = '/media/TOSHIBA EXT/ARMOR/labLogs2/'
a = pattern.a.load()
a = a.getWindow(400,400,200,200)
X, Y = np.meshgrid(range(200), range(200))
I, J = Y, X
X = dbz(matrix=X)
Y = dbz(matrix=Y)
2 = a.affineTransform(tr.rotation(rad=np.pi/3), origin=a.coordinateOrigin)
a2.showWith(a)
#####################################
# rotation
#for N in range(0, 10):
# print N, ' degrees'
# T = tr.rotation(rad=np.pi/180 * N)
xs =np.arange(0,0.05,0.002)
ys =[]
for x in xs:
T = tr.rotation(rad=x)
origin = (100,100)
X2 = X.affineTransform(T, origin=origin)
Y2 = Y.affineTransform(T, origin=origin)
diffx = X2-X
diffy = Y2-Y
diffx.setMaxMin()
diffy.setMaxMin()
#diffx.showWith(diffy)
diffx.matrix = (abs(diffx.matrix)<=1)
diffx.setMaxMin()
#diffx.show()
diffy.matrix = (abs(diffy.matrix)<=1)
diffy.setMaxMin()
#diffy.show()
diffxy = diffx.copy()
diffxy.matrix = diffx.matrix * diffy.matrix
diffxy.cmap = 'jet'
#diffxy.name = 'CFL Region for A Rotation of '+str(N) + ' degrees'
#diffxy.show()
#diffxy.saveImage(outputFolder+'rotation_'+str(N)+'degrees.jpg')
#time.sleep(1)
y = 1. * (diffxy.matrix==1).sum() / ((diffxy.matrix==0).sum() + (diffxy.matrix==1).sum())
print x, y
ys.append(y)
###############################
# stretching
#for N in range(-4,10):
# print N, ' percents'
xs =np.arange(0,0.05,0.002)
zs =[]
for x in xs:
T = np.zeros((2,3))
#T[0,0] = 1+ 0.01*N
#T[1,1] = 1+ 0.01*N
T[0,0] = 1- x
T[1,1] = 1+ x
origin = (100,100)
X2 = X.affineTransform(T, origin=origin)
Y2 = Y.affineTransform(T, origin=origin)
diffx = X2-X
diffy = Y2-Y
diffx.setMaxMin()
diffy.setMaxMin()
diffx.showWith(diffy)
diffx.matrix = (abs(diffx.matrix)<=1)
diffx.setMaxMin()
diffx.show()
diffy.matrix = (abs(diffy.matrix)<=1)
diffy.setMaxMin()
#diffy.show()
diffxy = diffx.copy()
diffxy.matrix = diffx.matrix * diffy.matrix
diffxy.cmap = 'jet'
#diffxy.name = 'CFL Region for stretching in both axes of '+str(N) + ' percents'
diffxy.show()
#diffxy.saveImage(outputFolder+'stretching_'+str(N)+'percents.jpg')
#time.sleep(1)
z = 1. * (diffxy.matrix==1).sum() / ((diffxy.matrix==0).sum() + (diffxy.matrix==1).sum())
print x, ',', z
zs.append(z)
| {
"repo_name": "yaukwankiu/armor",
"path": "tests/ablerCFLregionTest.py",
"copies": "1",
"size": "2564",
"license": "cc0-1.0",
"hash": 8411181453925741000,
"line_mean": 23.8932038835,
"line_max": 93,
"alpha_frac": 0.5819032761,
"autogenerated": false,
"ratio": 2.564,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.36459032761,
"avg_score": null,
"num_lines": null
} |
"""A block Davidson solver for finding a fixed number of eigenvalues.
Adapted from https://joshuagoings.com/2013/08/23/davidsons-method/
"""
import time
from typing import Tuple
import numpy as np
from tqdm import tqdm
def davidson(A: np.ndarray, k: int, eig: int) -> Tuple[np.ndarray, np.ndarray]:
assert len(A.shape) == 2
assert A.shape[0] == A.shape[1]
n = A.shape[0]
## set up subspace and trial vectors
# set of k unit vectors as guess
t = np.eye(n, k)
# hold guess vectors
V = np.zeros((n, n))
I = np.eye(n)
for m in tqdm(range(k, mmax, k)):
if m <= k:
for j in range(k):
V[:, j] = t[:, j] / np.linalg.norm(t[:, j])
theta_old = 1
elif m > k:
theta_old = theta[:eig]
V, R = np.linalg.qr(V)
T = V[:, : (m + 1)].T @ A @ V[:, : (m + 1)]
THETA, S = np.linalg.eig(T)
idx = THETA.argsort()
theta = THETA[idx]
s = S[:, idx]
for j in range(k):
w = (A - theta[j] * I) @ V[:, : (m + 1)] @ s[:, j]
q = w / (theta[j] - A[j, j])
V[:, (m + j + 1)] = q
norm = np.linalg.norm(theta[:eig] - theta_old)
if norm < tol:
break
return theta, V
if __name__ == "__main__":
# dimension of problem
n = 1200
# convergence tolerance
tol = 1e-8
# maximum number of iterations
mmax = n // 2
## set up fake Hamiltonian
sparsity = 1.0e-4
A = np.zeros((n, n))
for i in range(0, n):
A[i, i] = i + 1
A = A + sparsity * np.random.randn(n, n)
A = (A.T + A) / 2
# number of initial guess vectors
k = 8
# number of eigenvalues to solve
eig = 4
start_davidson = time.time()
theta, V = davidson(A, k, eig)
end_davidson = time.time()
print(f"davidson = {theta[:eig]}; {end_davidson - start_davidson} seconds")
start_numpy = time.time()
E, Vec = np.linalg.eig(A)
E = np.sort(E)
end_numpy = time.time()
print(f"numpy = {E[:eig]}; {end_numpy - start_numpy} seconds")
| {
"repo_name": "berquist/programming_party",
"path": "eric/project12/davidson.py",
"copies": "1",
"size": "2084",
"license": "mpl-2.0",
"hash": -368262847949303940,
"line_mean": 24.4146341463,
"line_max": 79,
"alpha_frac": 0.5143953935,
"autogenerated": false,
"ratio": 2.943502824858757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3957898218358757,
"avg_score": null,
"num_lines": null
} |
# a block device defines a set of blocks used by a file system
from DiskGeometry import DiskGeometry
class BlockDevice:
def _set_geometry(self, cyls=80, heads=2, sectors=11, block_bytes=512, reserved=2, bootblocks=2):
self.cyls = cyls
self.heads = heads
self.sectors = sectors
self.block_bytes = block_bytes
self.reserved = reserved
self.bootblocks = bootblocks
# derived values
self.num_tracks = self.cyls * self.heads
self.num_blocks = self.num_tracks * self.sectors
self.num_bytes = self.num_blocks * self.block_bytes
self.block_longs = self.block_bytes / 4
self.num_longs = self.num_blocks * self.block_longs
def dump(self):
print "cylinders: ",self.cyls
print "heads: ",self.heads
print "sectors: ",self.sectors
print "block_bytes:",self.block_bytes
print "reserved: ",self.reserved
print "bootblocks: ",self.bootblocks
def _blk_to_offset(self, blk_num):
return self.block_bytes * blk_num
# ----- API -----
def create(self, **args):
pass
def open(self):
pass
def close(self):
pass
def flush(self):
pass
def read_block(self, blk_num):
pass
def write_block(self, blk_num, data):
pass
def get_geometry(self):
return DiskGeometry(self.cyls, self.heads, self.sectors)
def get_chs_str(self):
return "chs=%d,%d,%d" % (self.cyls, self.heads, self.sectors)
def get_chs_dict(self):
return { 'chs' : "%d,%d,%d" % (self.cyls, self.heads, self.sectors) }
| {
"repo_name": "alpine9000/amiga_examples",
"path": "tools/external/amitools/amitools/fs/blkdev/BlockDevice.py",
"copies": "1",
"size": "1508",
"license": "bsd-2-clause",
"hash": 3982880807004444000,
"line_mean": 29.16,
"line_max": 99,
"alpha_frac": 0.651193634,
"autogenerated": false,
"ratio": 3.168067226890756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4319260860890756,
"avg_score": null,
"num_lines": null
} |
"""A Bluetooth data source."""
import logging
from openxc.controllers.base import Controller
from .socket import SocketDataSource
from .base import DataSourceError
LOG = logging.getLogger(__name__)
try:
import bluetooth
except ImportError:
LOG.debug("pybluez library not installed, can't use bluetooth interface")
bluetooth = None
class BluetoothVehicleInterface(SocketDataSource, Controller):
"""A data source reading from a bluetooth device.
"""
OPENXC_DEVICE_NAME_PREFIX = "OpenXC-VI-"
def __init__(self, address=None, **kwargs):
"""Initialize a connection to the bluetooth device.
Raises:
DataSourceError if the bluetooth device cannot be opened.
"""
super(BluetoothVehicleInterface, self).__init__(**kwargs)
self.address = address
if bluetooth is None:
raise DataSourceError("pybluez library is not available")
while self.address is None:
self.scan_for_bluetooth_device()
self.connect()
def connect(self):
# TODO push this to a background connecting thread so the constructor
# can return
port = 1
connected = False
while not connected:
self.socket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
try:
self.socket.connect((self.address, port))
except IOError as e:
LOG.warn("Unable to connect to %s" % self.address, e)
else:
LOG.info("Opened bluetooth device at %s", port)
connected = True
def scan_for_bluetooth_device(self):
nearby_devices = bluetooth.discover_devices()
self.address = None
device_name = None
for address in nearby_devices:
device_name = bluetooth.lookup_name(address)
if (device_name is not None and
device_name.startswith(self.OPENXC_DEVICE_NAME_PREFIX)):
self.address = address
break
if self.address is not None:
LOG.info("Discovered OpenXC VI %s (%s)" % (device_name, self.address))
else:
LOG.info("No OpenXC VI devices discovered")
| {
"repo_name": "openxc/openxc-python",
"path": "openxc/sources/bluetooth.py",
"copies": "1",
"size": "2211",
"license": "bsd-3-clause",
"hash": -5424071361890744000,
"line_mean": 30.1408450704,
"line_max": 82,
"alpha_frac": 0.6155585708,
"autogenerated": false,
"ratio": 4.4397590361445785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5555317606944579,
"avg_score": null,
"num_lines": null
} |
"""A board is a list of list of str. For example, the board
ANTT
XSOB
is represented as the list
[['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']]
A word list is a list of str. For example, the list of words
ANT
BOX
SOB
TO
is represented as the list
['ANT', 'BOX', 'SOB', 'TO']
"""
def is_valid_word(wordlist, word):
""" (list of str, str) -> bool
Return True if and only if word is an element of wordlist.
>>> is_valid_word(['ANT', 'BOX', 'SOB', 'TO'], 'TO')
True
>>> is_valid_word(['ANT', 'BOX', 'SOB', 'TO'], 'TWO')
True
"""
found = False
for w in wordlist:
if w == word:
found = True
return found
def make_str_from_row(board, row_index):
""" (list of list of str, int) -> str
Return the characters from the row of the board with index row_index
as a single string.
>>> make_str_from_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 0)
'ANTT'
>>> make_str_from_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 2)
"Index out of range"
"""
if row_index < len(board):
#within range
rowstr = ''
for rowchar in board[row_index]:
rowstr = rowstr + rowchar
return rowstr
else:
return "Index out of range"
def make_str_from_column(board, column_index):
""" (list of list of str, int) -> str
Return the characters from the column of the board with index column_index
as a single string.
>>> make_str_from_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 1)
'NS'
>>> make_str_from_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O']], 3)
'T '
"""
length = len(board)
rownum = 0
colstr = ''
while rownum < length:
if column_index < len(board[rownum]):
colstr = colstr + board[rownum][column_index]
else:
colstr = colstr + ''
rownum = rownum + 1
return colstr
def board_contains_word_in_row(board, word):
""" (list of list of str, str) -> bool
Return True if and only if one or more of the rows of the board contains
word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'SOB')
True
"""
for row_index in range(len(board)):
if word in make_str_from_row(board, row_index):
return True
return False
def board_contains_word_in_column(board, word):
""" (list of list of str, str) -> bool
Return True if and only if one or more of the columns of the board
contains word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'NO')
False
"""
for row in range(len(board)):
for col in range(len(board[row])):
if word in make_str_from_column(board, col):
return True
return False
def board_contains_word(board, word):
""" (list of list of str, str) -> bool
Return True if and only if word appears in board.
Precondition: board has at least one row and one column.
>>> board_contains_word([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'ANT')
True
>>> board_contains_word([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'NS')
True
>>> board_contains_word([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'NAB')
False
"""
if board_contains_word_in_column(board, word) or board_contains_word_in_row(board, word):
return True
return False
def word_score(word):
""" (str) -> int
Return the point value the word earns.
Word length: < 3: 0 points
3-6: 1 point per character for all characters in word
7-9: 2 points per character for all characters in word
10+: 3 points per character for all characters in word
>>> word_score('DRUDGERY')
16
"""
length = len(word)
points = 0
if length >=3 and length <= 6:
points = length
elif length >= 7 and length <= 9:
points = 2 * length
elif length >= 10:
points = 3 * length
return points
def update_score(player_info, word):
""" ([str, int] list, str) -> NoneType
player_info is a list with the player's name and score. Update player_info
by adding the point value word earns to the player's score.
>>> update_score(['Jonathan', 4], 'ANT')
"""
score = word_score(word)
player_info[0][1] = player_info[0][1] + score
def num_words_on_board(board, words):
""" (list of list of str, list of str) -> int
Return how many words appear on board.
>>> num_words_on_board([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], ['ANT', 'BOX', 'SOB', 'TO'])
3
"""
count = 0
for word in words:
if board_contains_word(board, word):
count = count +1
return count
def read_words(words_file):
""" (file open for reading) -> list of str
Return a list of all words (with newlines removed) from open file
words_file.
Precondition: Each line of the file contains a word in uppercase characters
from the standard English alphabet.
"""
lines = open(words_file, 'r').readlines()
words = [line[:-1] for line in lines]
return words
def read_board(board_file):
""" (file open for reading) -> list of list of str
Return a board read from open file board_file. The board file will contain
one row of the board per line. Newlines are not included in the board.
"""
i = 0
board= []
words = read_words(board_file)
for word in words:
temp=[]
for letter in word:
temp.append(letter)
board.append(temp)
return board
| {
"repo_name": "shilpavijay/Word-Search-Board-Game",
"path": "a3.py",
"copies": "1",
"size": "5844",
"license": "unlicense",
"hash": 6597106586216022000,
"line_mean": 26.1813953488,
"line_max": 101,
"alpha_frac": 0.5550992471,
"autogenerated": false,
"ratio": 3.3897911832946637,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44448904303946635,
"avg_score": null,
"num_lines": null
} |
'''A board is a list of list of str. For example, the board
ANTT
XSOB
is represented as the list
[['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']]
A word list is a list of str. For example, the list of words
ANT
BOX
SOB
TO
is represented as the list
['ANT', 'BOX', 'SOB', 'TO']
'''
def is_valid_word(wordlist, word):
''' (list of str, str) -> bool
Return True if and only if word is an element of wordlist.
>>> is_valid_word(['ANT', 'BOX', 'SOB', 'TO'], 'TO')
True
'''
return word in wordlist
def make_str_from_row(board, row_index):
''' (list of list of str, int) -> str
Return the characters from the row of the board with index row_index
as a single string.
>>> make_str_from_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 0)
'ANTT'
'''
string = ''
for i in range(len(board[row_index])):
string += board[row_index][i]
return string
def make_str_from_column(board, column_index):
''' (list of list of str, int) -> str
Return the characters from the column of the board with index column_index
as a single string.
>>> make_str_from_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 1)
'NS'
'''
string = ''
for i in range(len(board)):
string += board[i][column_index]
return string
def board_contains_word_in_row(board, word):
''' (list of list of str, str) -> bool
Return True if and only if one or more of the rows of the board contains
word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_row([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'SOB')
True
'''
for row_index in range(len(board)):
if word in make_str_from_row(board, row_index):
return True
return False
def board_contains_word_in_column(board, word):
''' (list of list of str, str) -> bool
Return True if and only if one or more of the columns of the board
contains word.
Precondition: board has at least one row and one column, and word is a
valid word.
>>> board_contains_word_in_column([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'NO')
False
'''
for column_index in range(len(board[0])):
if word in make_str_from_column(board,column_index):
return True
return False
def board_contains_word(board, word):
'''(list of list of str, str) -> bool
Return True if and only if word appears in board.
Precondition: board has at least one row and one column.
>>> board_contains_word([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], 'ANT')
True
'''
return board_contains_word_in_column(board,word) or board_contains_word_in_row(board,word)
def word_score(word):
'''(str) -> int
Return the point value the word earns.
Word length: < 3: 0 points
3-6: 1 point per character in word
7-9: 2 points per character in word
10+: 3 points per character in word
>>> word_score('DRUDGERY')
16
'''
word_length = len(word)
points_per_char= 0
if word_length < 3:
points_per_char = 0
elif word_length in range(3,7):
points_per_char = 1
elif word_length in range(7,10):
points_per_char = 2
else:
points_per_char = 3
return points_per_char * word_length
def update_score(player_info, word):
'''([str, int] list, str) -> NoneType
player_info is a list with the player's name and score. Update player_info
by adding the point value word earns to the player's score.
>>> update_score(['Jonathan', 4], 'ANT')
'''
player_info[1] += word_score(word)
def num_words_on_board(board, words):
'''(list of list of str, list of str) -> int
Return how many words appear on board.
>>> num_words_on_board([['A', 'N', 'T', 'T'], ['X', 'S', 'O', 'B']], ['ANT', 'BOX', 'SOB', 'TO'])
3
'''
word_count = 0
for word in words:
if board_contains_word(board,word):
word_count +=1
return word_count
def read_words(words_file):
''' (file open for reading) -> list of str
Return a list of all words (with newlines removed) from open file
words_file.
Precondition: Each line of the file contains a word in uppercase characters
from the standard English alphabet.
'''
words = []
for line in words_file.readlines():
words.append(line.strip())
return words
def read_board(board_file):
''' (file open for reading) -> list of list of str
Return a board read from open file board_file. The board file will contain
one row of the board per line. Newlines are not included in the board.
'''
board = []
for line in board_file.readlines():
lst = []
for char in line.strip():
lst.append(char)
board.append(lst)
return board
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"repo_name": "penkz/python-fundamentals",
"path": "Assignment3/a3.py",
"copies": "1",
"size": "5044",
"license": "mit",
"hash": 8680311544882167000,
"line_mean": 25.2708333333,
"line_max": 101,
"alpha_frac": 0.5773195876,
"autogenerated": false,
"ratio": 3.438309475119291,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4515629062719291,
"avg_score": null,
"num_lines": null
} |
"""A board is the main area of play for different players in the game. This is
were all game pieces are played and is used to determine most of the players'
final scores.
A board inclues multiple elements: buildings (contigious blocks of building
pieces and stables), a market street (or streets), towers with walls branching
off them, and a well.
"""
import random
from Player import *
from Location import *
from Building import *
from Market import *
from Tower import *
from Move import *
def make_board(rows, columns):
"""Makes a board with a default game setup,
One well will be randomly placed.
A set of towers will be made.
A market will be created with a randomly placed merchant.
A board has Buildings, a market, towers, and a well
"""
well_location = random_central_location(rows, columns)
market_start = random_central_location(rows, columns)
while market_start == well_location:
market_start = random_central_location(rows, columns)
return {'Rows':rows, 'Columns':columns, 'Buildings':[], \
'Market':make_market(market_start), 'Towers':make_towers(rows, columns), \
'Well':well_location}
def clone_board(board):
"""makes a deep clone of a board"""
return {'Rows':get_rows(board), 'Columns':get_columns(board), \
'Buildings':[clone_building(building) for building in get_buildings(board)], \
'Market':clone_market(get_market(board)),
'Towers':clone_towers(get_towers(board)),
'Well':get_well(board)}
def get_piece(board, location):
"""Gets a piece at a given location with from a board. The piece type
returned will be that of those found in Move"""
row = get_row(location)
col = get_column(location)
assert 0 <= row < get_rows(board) and 0 <= col < get_columns(board)
if market_contains_location(get_market(board),(board)):
return MERCHANT
for building in get_buildings(board):
if location in get_stable_locations(building):
return STABLE
elif building_contains_location(building, location):
return BUILDING
if location == get_well(board):
return WELL
def random_central_location(rows, columns):
"""Creates a random location in the center part of town: Not touching a wall"""
return make_location(random.randrange(rows - 2) + 1, random.randrange(columns - 2) + 1)
def get_rows(board):
"""Gets the number of rows in a board."""
return board['Rows']
def get_columns(board):
"""Gets the number of columns in a board."""
return board['Columns']
def get_buildings(board):
"""Gets the buildings on a board."""
return board['Buildings']
def get_market(board):
"""Gets the market in a board."""
return board['Market']
def get_towers(board):
"""Gets the towers and walls in a board."""
return board['Towers']
def get_well(board):
"""Gets the location of the well on a board."""
return board['Well']
def get_all_locations(board):
"""Gets a set of all locations in a board."""
rows = get_rows(board)
columns = get_columns(board)
return [make_location(i // columns, i % columns) for i in range(rows * columns)]
def get_buildings_claimed_by(board, player_name):
"""Gets all the buildings claimed by a player with the given name."""
return [building for building in get_buildings(board) if get_owner(building) == player_name]
def get_bounded_set(board, location_set):
"""Gets a set of all locations in location_set that are within the bounds
of the board. Locations are considered within the bounds if the row
of the location is >= 0 and < row and if the column is >= 0 and < columns."""
bounded = set()
for loc in location_set:
if get_row(loc) < get_rows(board) and get_row(loc) >= 0 and \
get_column(loc) < get_columns(board) and get_column(loc) >= 0:
bounded.add(loc)
return bounded
def get_stable_piece_location(board):
"""Gets all the locations in which a stable can be attached to a building"""
possible = set()
for building in get_buildings(board):
temp = set(get_building_peice_attach(building))
for building2 in get_buildings(board):
if building2 != building:
temp = temp.difference(set(get_building_and_stables(building2)))
temp = temp.difference(set(get_building_stable_adjacent(building2)))
possible = possible.union(temp)
for street in get_market(board):
for loc in street:
if loc in possible:
possible.remove(loc)
well = get_well(board)
if well in possible:
possible.remove(well)
possible -= set(get_adjacent(get_well(board)))
return get_bounded_set(board, possible)
def get_building_piece_locations(board, color):
"""Gets all the locations in which a building piece can be attached for a
specific color. If there is no building of this color currently active,
this will return all open locations on the board that are not adjacent to
a structure. This will return an empty list if nothing can be attached to
the building."""
active = get_active_building(board, color)
#If there is no active buidling, return all open locations
possible = set()
if active == None:
possible = set(get_all_locations(board))
else:
possible = get_building_peice_attach(active)
possible = get_bounded_set(board, possible)
for street in get_market(board):
possible -= set(street);
for building in get_buildings(board):
if building != active:
#print(get_building_and_stables(building))
for loc in get_building_and_stables(building):
if loc in possible:
possible.remove(loc)
for loc in get_building_stable_adjacent(building):
if loc in possible:
possible.remove(loc)
well = get_well(board)
if well in possible:
possible.remove(well)
possible -= set(get_adjacent(get_well(board)))
return get_bounded_set(board, possible);
def can_place_building_piece(board, location, color):
"""Checks if a piece can be added to a board at a specific location. This
involves a few checks and can be one of two cases.
The first case is if there is no active building of that color, then the
building must be placed in an empty location that is not adjacent to any
structure (well or other building).
The second case is if there is an active building of that color. Then the
piece must be placed in an empty location that is orthogonal to the
active building. It must be placed contigious to the building pieces in the
building and cannot be attached to a stable (stables attach to buidlings,
buildings cannot attach to stables)."""
#If the location is not empty, return False
if not is_location_empty(board, location):
return False
#Get active building of given color
active = get_active_building(board, color)
#If there is no active building, check if is adjacent to a structure
if active == None:
return not is_adjacent_to_structure(board, location)
#If there is an active building, check to make sure the location is
# contigious to the building
return location in get_building_piece_attach(active)
def start_new_building(board, location, color):
"""Starts a new building at a given location."""
get_buildings(board).append(make_building(color, location))
def is_adjacent_to_structure(board, location):
"""Checks if the location is adjacent to the well or a building. This
includes the stables attached to a building."""
if location in get_adjacent(get_well(board)):
return True
for building in get_buildings(board):
if location in get_building_stable_adjacent(building):
return True
return False
def add_market_street(board, start):
"""Adds a new market street to the market and makes this street the active
street."""
get_market(board).add_market_street(market, start)
def can_place_on_current_street(board):
"""Checks if any additions can be made to the current active market
street in the market."""
market = get_market(board)
#Get possible additions to current active street.
possible = get_possible_addition(market)
#Filter out locations already occupied
possible = get_bounded_set(board, possible)
for building in get_buildings(board):
possible -= set(get_building_and_stables(building))
possible.remove(get_well(board))
#If there are open spaces, return the open spaces.
return len(possible) > 0
def get_merchant_place_locations(board):
"""This will get all the locations on the board in which a merchant can be
placed. If the market street has open locations at the head or tail of the
street, this will return possible open locations. If the market street does
not have open locations to attach a merchant, this will return every open
location on the board in which a new street can be started. """
market = get_market(board)
#Get possible additions to current active street.
possible = get_possible_addition(market)
#Filter out locations already occupied
possible = get_bounded_set(board, possible)
for building in get_buildings(board):
possible -= set(get_building_and_stables(building))
well = get_well(board)
if well in possible:
possible.remove(well)
#If there are open spaces, return the open spaces.
if len(possible) > 0:
return possible
#If there are no open spaces, get all the locations.
possible = set(get_all_locations(board))
#Remove currently occupied locations and locations next to the streets.
for street in market:
possible -= set(street)
possible -= set(get_adjacent_to_street(street))
for building in get_buildings(board):
possible -= set(get_building_and_stables(building))
if well in possible:
possible.remove(well)
return get_bounded_set(board, possible)
def is_location_empty(board, location):
"""Checks if a location is empty on the board. This checks if the location
is part of the market, building, or well."""
if location == get_well(board):
return False;
for building in get_buildings(board):
if buliding_contans_location_stables(building, location):
return False
if market_contains_location(market, location):
return False
return True
def get_buildings_by_color(board, color):
"""Gets all the buildings of a specified color on a board. This will return
an empty list if the board has no builidngs of that color."""
return [building for building in get_buildings(board) if get_building_color(building) == color]
def get_active_building(board, color):
"""Gets the active building of a color (aka, it doesn't have an owner), or
None if there is no active building of that color."""
for building in get_buildings_by_color(board, color):
if not has_owner(building):
return building
return None
def get_num_walls_adjacent_to_building(board, building):
"""Gets the number of walls orthogonally adjacent to a given building."""
wall_locations = get_wall_locations(get_towers(board))
count = 0
orthogonal = get_building_stable_orthogonal(building)
for wall in wall_locations:
if wall in orthogonal:
count += 1
return count
def get_num_merchants_adjacent_to_building(board, building):
"""Gets the number of merchants orthogonally adjacent to a given buidling."""
merchant_locations = []
for street in get_market(board):
merchant_locations += street
count = 0
orthogonal = get_building_stable_orthogonal(building)
for merchant in merchant_locations:
if merchant in orthogonal:
count += 1
return count
def get_connected_towers(board, building):
"""Gets the tower numbers that a building is connected to. Tower's are
numbered 1 through 4 each worth different points and has a different
associated tile."""
def is_connected_to_tower(tower_number, orthogonal):
tower_walls = get_wall_locations_for_tower(get_towers(board), tower_number)
for wall in tower_walls:
if wall in orthogonal:
return True
return False
orthogonal = get_building_stable_othogonal(building)
return [num for num in range(1, 5) if is_connected_to_tower(num, orthogonal)]
| {
"repo_name": "nicholas-maltbie/Medina",
"path": "Board.py",
"copies": "1",
"size": "12912",
"license": "mit",
"hash": -5858099316050800000,
"line_mean": 40.0586319218,
"line_max": 99,
"alpha_frac": 0.6648079306,
"autogenerated": false,
"ratio": 4.045112781954887,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005743472849413815,
"num_lines": 307
} |
"""abode output utilities
.. codeauthor:: Joe DeCapo <joe@polka.cat>
"""
import clowder.util.formatting as fmt
from clowder.util.console import CONSOLE
def separator(message: str, character: str) -> None:
sep = character * len(message)
CONSOLE.stdout(fmt.bold(sep))
def h1(message: str, newline: bool = True) -> None:
if newline:
CONSOLE.stdout()
CONSOLE.stdout(fmt.bold(message))
separator(message, '=')
def h2(message: str, newline: bool = True) -> None:
if newline:
CONSOLE.stdout()
CONSOLE.stdout(fmt.bold(message))
separator(message, '-')
def h3(message: str, newline: bool = True) -> None:
if newline:
CONSOLE.stdout()
CONSOLE.stdout(fmt.bold(fmt.underline(f'# {message}')))
def h4(message: str, newline: bool = True) -> None:
if newline:
CONSOLE.stdout()
CONSOLE.stdout(fmt.bold(fmt.underline(f'## {message}')))
def h5(message: str, newline: bool = True) -> None:
if newline:
CONSOLE.stdout()
CONSOLE.stdout(fmt.bold(fmt.underline(f'### {message}')))
| {
"repo_name": "JrGoodle/clowder",
"path": "clowder/util/output.py",
"copies": "1",
"size": "1069",
"license": "mit",
"hash": 4454790670431915000,
"line_mean": 22.7555555556,
"line_max": 61,
"alpha_frac": 0.6417212348,
"autogenerated": false,
"ratio": 3.21021021021021,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43519314450102103,
"avg_score": null,
"num_lines": null
} |
# A bot that blindly plays 2048
# Henry Barrow 2015
from selenium import webdriver # Need to 'pip install selenium' first
from selenium.webdriver.common.keys import Keys
# Launch Firefox and 2048
browser = webdriver.Firefox()
browser.get('http://doge2048.com/')
def play2048():
# locate grid, game-over, and score by css selectors
elem = browser.find_element_by_css_selector('.game-container')
GameOver = browser.find_element_by_css_selector('.game-message > p:nth-child(1)')
scoreElem = browser.find_element_by_css_selector('.score-container')
score = scoreElem.text.strip()
print 'Now playing...'
#blind logic
while len(GameOver.text.strip()) == 0:
elem.send_keys(Keys.DOWN)
elem.send_keys(Keys.RIGHT)
elem.send_keys(Keys.DOWN)
elem.send_keys(Keys.LEFT)
# Press UP only as necessary
if score == scoreElem.text.strip():
elem.send_keys(Keys.UP)
score = scoreElem.text.strip()
if score.find('\n') > 0:
score = score[:score.find('\n')]
print 'Game Over! Score = ' + score + '\n'
return int(score)
# Play until target score is exceeded
score = play2048()
while score < 10000:
TryElem = browser.find_element_by_css_selector('.retry-button')
TryElem.click()
score = play2048()
| {
"repo_name": "hgbarrow/PythonSnippets",
"path": "doge2048bot.py",
"copies": "1",
"size": "1218",
"license": "mit",
"hash": -2141779333012321500,
"line_mean": 27.3255813953,
"line_max": 82,
"alpha_frac": 0.7085385878,
"autogenerated": false,
"ratio": 3.0680100755667508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42765486633667504,
"avg_score": null,
"num_lines": null
} |
"""A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <gboutsioukis@gmail.com>"
import logging
import itertools
from collections import defaultdict
from . import pytree
from .btm_utils import reduce_tree
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
count = itertools.count()
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = next(BMNode.count)
self.content = ''
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == ";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| {
"repo_name": "zooba/PTVS",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/lib2to3/btm_matcher.py",
"copies": "33",
"size": "6623",
"license": "apache-2.0",
"hash": 7580694535180002000,
"line_mean": 39.6319018405,
"line_max": 89,
"alpha_frac": 0.5751170165,
"autogenerated": false,
"ratio": 4.323107049608355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0046035295435731165,
"num_lines": 163
} |
from tkinter import *
import time
import random
SLEEP_TIME = 0.01
PADDLE_SPEED = [20, 10]
BALL_SPEED = [1, 3]
# Model for the Ball class
# canvas is the tkinter current canvas
# color is the color of the ball
# paddle_pos is the current position of the paddle
# speed [x, y] is the absolute speed of the ball
class Ball:
def __init__(self, canvas, color, speed):
self.canvas = canvas
self.color = color
self.ball = canvas.create_oval(0, 0, 10, 10, fill=color)
self.speed = speed
canvas.move(self.ball, 250, 250)
# paddle is the identifier of the paddle element
# (0,1)---------
# | |
# | |
# ---------(3,4)
def move(self, paddle):
cur_pos = self.canvas.coords(self.ball)
paddle_pos = self.canvas.coords(paddle)
self.canvas.move(self.ball, self.speed[0], self.speed[1])
if cur_pos[1] <= 0:
self.speed[1] = abs(self.speed[1])
if cur_pos[3] >= self.canvas.winfo_height():
self.speed[1] = -abs(self.speed[1])
if cur_pos[0] <= 0:
self.speed[0] = abs(self.speed[0])
if cur_pos[2] >= self.canvas.winfo_width():
self.speed[0] = -abs(self.speed[0])
# check against the top surface of the paddle
if cur_pos[2] >= paddle_pos[0] and cur_pos[2] <= paddle_pos[2] and cur_pos[3] >= paddle_pos[1] and cur_pos[3] <= paddle_pos[3] and self.speed[1] > 0:
self.speed[1] = -abs(self.speed[1])
if cur_pos[0] >= paddle_pos[0] and cur_pos[0] <= paddle_pos[2] and cur_pos[3] >= paddle_pos[1] and cur_pos[3] <= paddle_pos[3] and self.speed[1] > 0:
self.speed[1] = -abs(self.speed[1])
# check against the bottom surface of the paddle
if cur_pos[2] >= paddle_pos[0] and cur_pos[2] <= paddle_pos[2] and cur_pos[1] <= paddle_pos[3] and cur_pos[1] >= paddle_pos[1] and self.speed[1] < 0:
self.speed[1] = abs(self.speed[1])
if cur_pos[0] >= paddle_pos[0] and cur_pos[0] <= paddle_pos[2] and cur_pos[1] <= paddle_pos[3] and cur_pos[1] >= paddle_pos[1] and self.speed[1] < 0:
self.speed[1] = abs(self.speed[1])
def hit_bottom(self):
cur_pos = self.canvas.coords(self.ball)
if (cur_pos[3] >= 500):
return True
else:
return False
def stop(self):
self.speed = [0, 0]
class Paddle:
def __init__(self, canvas, color, x_speed, y_speed):
self.canvas = canvas
self.color = color
self.x_speed = x_speed
self.y_speed = y_speed
self.paddle = self.canvas.create_rectangle(0, 0, 60, 10, fill=color)
canvas.move(self.paddle, 250, 400)
self.canvas.bind_all('<KeyPress>', self.move)
def move(self, event):
cur_pos = self.canvas.coords(self.paddle)
x_speed = self.x_speed
y_speed = self.y_speed
if event.keysym == 'Left':
x_speed = -(abs(self.x_speed))
y_speed = 0
if cur_pos[0] <= 0:
x_speed = 0
if event.keysym == 'Right':
x_speed = abs(self.x_speed)
y_speed = 0
if cur_pos[2] >= self.canvas.winfo_width():
x_speed = 0
if event.keysym == 'Up':
x_speed = 0
y_speed = -(abs(self.y_speed))
if cur_pos[1] <= 0:
y_speed = 0
if event.keysym == 'Down':
x_speed = 0
y_speed = abs(self.y_speed)
if cur_pos[3] >= self.canvas.winfo_height():
y_speed = 0
self.canvas.move(self.paddle, x_speed, y_speed)
def stop(self):
self.x_speed = 0
self.y_speed = 0
class HitBlock:
# x and y are the coordinates of the top-left corner of the hitblock
# HitBlock is another advanced feature to be developed. There will be random hitboxes in the center of the canvas
# The player will try to protect the hitboxes from being hit by the ball
# Every time the ball passes through the hitbox, it is considered as a hit. And the color of the box will be deeper
# When the color of the box is black, the player loses
def __init__(self, canvas, color, x, y):
self.canvas = canvas
self.color = color
self.x = x
self.y = y
self.hit_block = self.canvas.create_rectangle(self.x, self.y, self.x+20, self.y+20, fill=self.color)
canvas.move(self.hit_block, self.x, self.y)
# Setup the tk environment: title and configuration
def setup(tk):
tk.title("Bounce Ball Game")
tk.resizable(0,0)
tk.wm_attributes("-topmost", 0)
# Create hitblock(s) at random locations
def create_hit_blocks(canvas, num_blocks):
hit_blocks = []
i = 0
while i < num_blocks:
x = random.randint(100, 200)
y = random.randint(100, 200)
hit_blocks.append(HitBlock(canvas, "white", x, y))
i = i + 1
return hit_blocks
# Main function
# main loop
def main():
# setup the tkinter
tk = Tk()
setup(tk)
# setup the canvas and initaite the elements
canvas = Canvas(tk, width=500, height=500, bd=0, highlightthickness=0)
canvas.pack()
tk.update()
paddle = Paddle(canvas=canvas, color="blue", x_speed=PADDLE_SPEED[0], y_speed=PADDLE_SPEED[1])
ball = Ball(canvas=canvas, color="red", speed=BALL_SPEED)
game_over_msg = canvas.create_text(250, 200, text="GAME OVER!", state='hidden')
#hit_blocks = create_hit_blocks(canvas, 3)
while True:
tk.update()
ball.move(paddle.paddle)
tk.update()
time.sleep(SLEEP_TIME)
if (ball.hit_bottom()):
ball.stop()
paddle.stop()
time.sleep(1)
canvas.itemconfig(game_over_msg, state='normal')
main()
| {
"repo_name": "VictaLab/victalab_cpsc",
"path": "games/bouncing-ball-game/bounce-ball-game.py",
"copies": "1",
"size": "5793",
"license": "apache-2.0",
"hash": -5270199365514646000,
"line_mean": 33.8975903614,
"line_max": 157,
"alpha_frac": 0.5779388918,
"autogenerated": false,
"ratio": 3.1011777301927195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41791166219927195,
"avg_score": null,
"num_lines": null
} |
# about a dataset using pandas and numpy
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df= pd.read_csv ('school_immunizations.csv')
df= df.dropna()
#print df.head(100)
#had to change PERCENT from object to numeric with this code
df['PERCENT']= pd.to_numeric (df['PERCENT'])
print df.info()
print df.groupby('CATEGORY')['PERCENT'].mean()
# 95% of all 7th graders are up to date on their immunizations.
# School Year Analysis
df_2013 = df[df['SCHOOL YEAR'] == '2013-2014']
print df_2013.groupby('CATEGORY')['PERCENT'].mean()
# 94% of students were up to date in the 2013-2014 school year
# In January 2014 parents had to be counseled by a healthcare professional
# of declare religious exemption, many of these students have neither
# because they were entered into the system in the pre-Jan PBE time
mean_2013= df_2013.groupby('CATEGORY')['PERCENT'].mean()
mean_2013.plot.bar()
plt.subplots_adjust(bottom=.55)
plt.savefig('Figure1_2013')
plt.show()
df_2014 = df[df['SCHOOL YEAR'] == '2014-2015']
print df_2014.groupby('CATEGORY')['PERCENT'].mean()
# from metadata, PBE started in Jan 2014.
# 96.5% of students are up to date
# we see the the split of the PBE: 3% of students have PBE
# 1.9% were counseled by a healtcare professional
# 1.5% declared religious exemption
# the rest <0.01% where declared permanent medical exemption
mean_2014= df_2014.groupby('CATEGORY')['PERCENT'].mean()
mean_2014.plot.bar()
plt.subplots_adjust(bottom=.55)
plt.savefig('Figure1_2014')
plt.show()
df_2015 = df[df['SCHOOL YEAR'] == '2015-2016']
print df_2015.groupby('CATEGORY')['PERCENT'].mean()
# up to date declines slightly to 96%
# similar breakdown of religious/health care counseled PBE
# new field for overdue 0.7%
mean_2015= df_2015.groupby('CATEGORY')['PERCENT'].mean()
mean_2015.plot.bar()
plt.subplots_adjust(bottom=.55)
plt.savefig('Figure1_2015')
plt.show()
# looking at it a little differently, up to date by year
df_uptodate = df[df['CATEGORY'] == 'Up-To-Date']
mean_uptodate= df_uptodate .groupby('SCHOOL YEAR')['PERCENT'].mean()
d= mean_uptodate.plot.bar()
d.set_ylim(.9,1)
plt.savefig('Figure1_uptodate')
# school type analysis
df_uptodate_private = df[df['SCHOOL TYPE'] == 'PRIVATE']
mean_uptodate_private= df_uptodate_private.groupby('CATEGORY')['PERCENT'].mean()
print mean_uptodate_private
df_uptodate_public = df[df['SCHOOL TYPE'] == 'PUBLIC']
mean_uptodate_public= df_uptodate_private.groupby('CATEGORY')['PERCENT'].mean()
print mean_uptodate_public
# not seeing anything here : /
| {
"repo_name": "artopping/nyu-python",
"path": "course3/assignments/about_a_dataset/about_a_dataset.py",
"copies": "1",
"size": "2563",
"license": "mit",
"hash": 6575254428271700000,
"line_mean": 29.5119047619,
"line_max": 80,
"alpha_frac": 0.7186890363,
"autogenerated": false,
"ratio": 2.9767711962833916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9062657704058377,
"avg_score": 0.026560505705002964,
"num_lines": 84
} |
about = "cfvg-bot is a discord bot made by LittleFighterFox with a set of commands that is useful for discussing cardfight vanguard. Currently supporting mathematical probability calcuations, it should soon be extended to have automatic searching of cards. Project can found at https://github.com/NanoSmasher/cfvg-discordbot"
helping = {
"eval": '''vbot eval [*]
Evaluates expression supporting BEDMAS operations, probabilities (AND,OR,XOR) and Geometric Distribution of at least 1 (!a,b,c,d)
#NOTE: Population size and Sample size swapped for all other functions
a := Sample size
b := Possible successes
c := Population size
d := Number of successes
''',
"hgcc": '''vbot hgcc [*1] [*2] [*3] [*4] [*5]
Hyper Geometric Cumulative Calculator
*1 := Population size
*2 := Possible successes
*3 := Sample size
*4 := Number of successes
*5 := Available inputs (no quotes): '<' , '<=' , '>' , '>=' , '='
''',
"quickodds": '''vbot quickodds [*1] [*2] [*3] [*4]
Displays all probabilities of a given value
a: Population size
b: Possible successes
c: Sample size
d: # of successes
''',
"cascadeodds": '''vbot cascadeodds [*1] [*2] [*3]
Print exact odds for each # of successes
*1 := Population size
*2 := Possible successes
*3 := Sample size
''',
"updatedb": '''vbot updatedb [*1] `ADMIN COMMAND ONLY`
Updates the following database:
epic := EpicTCG
cfvg := Cardfight!! Vanguard
'''
} | {
"repo_name": "TiniKhang/cfvg-discordbot",
"path": "text.py",
"copies": "2",
"size": "1416",
"license": "mit",
"hash": -6821362145188298000,
"line_mean": 36.2894736842,
"line_max": 325,
"alpha_frac": 0.6843220339,
"autogenerated": false,
"ratio": 3.091703056768559,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9403347264280828,
"avg_score": 0.0745355652775461,
"num_lines": 38
} |
# about database connect and some actions interface.
# writed by sunhuachuang #
import main.automatic, main.action, main.custom
def connect_check(sql, params):
if sql == 'mysql':
try:
import main.sql.mysql
return main.sql.mysql.connect_check(params)
except ImportError:
return 'you need install pymysql (pip install PyMySQL)'
# get all databases
# @params sql_name(str), params({})
# @return []
def show_databases(sql, params):
sqlpackage = __initsql(sql)
databases = sqlpackage.show_databases(params)
return databases
# get all tables
# @params sql_name(str), params({}), database_name(str)
# @return []
def show_tables(sql, params, database):
sqlpackage = __initsql(sql)
return sqlpackage.show_tables(params, database)
# execute query
# @params sql_name(str), params({}), query(str)
# @return bool
def execute(sql, params, query):
pass
# create format query for insert
# @params sql_name(str), params({}), table_name(str), fields([{}])
# @return now_rows_number
def insert(sql, params, database, table, fields, number):
new_fields = main.custom.custom_fields(sql, database, table, fields)
data = main.action.create_data(new_fields, number) #TODO
sqlpackage = __initsql(sql)
return sqlpackage.insert_data(params, database, table, data)
# create format query for delete
# @params sql_name(str), params({}), database(str), table_name(str)
# @reutrn str
def delete(sql, params, database, table):
pass
# auto analyze the table
# @params sql_name(str), params({}), database(str), table_name(str)
# @return []
def analyze_table(sql, params, database, table):
sqlpackage = __initsql(sql)
descs = sqlpackage.desc_table(params, database, table)
return main.automatic.analyze(descs)
# count the rows in table
def count_table(sql, params, database, table):
sqlpackage = __initsql(sql)
return sqlpackage.count_table(params, database, table)
def __initsql(sql):
if sql == 'mysql':
import main.sql.mysql
return main.sql.mysql
| {
"repo_name": "sunhuachuang/pytestdata",
"path": "main/db.py",
"copies": "1",
"size": "2062",
"license": "mit",
"hash": -5310976924303422000,
"line_mean": 30.7230769231,
"line_max": 72,
"alpha_frac": 0.6823472357,
"autogenerated": false,
"ratio": 3.465546218487395,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9624569458701614,
"avg_score": 0.004664799097156172,
"num_lines": 65
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import os
import os.path
import textView
import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = '.'.join(tkVer)
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "mujiansu/arangodb",
"path": "3rdParty/V8-4.3.61/third_party/python_26/Lib/idlelib/aboutDialog.py",
"copies": "52",
"size": "6800",
"license": "apache-2.0",
"hash": -2094931173835926800,
"line_mean": 44.3333333333,
"line_max": 80,
"alpha_frac": 0.5666176471,
"autogenerated": false,
"ratio": 3.490759753593429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = '.'.join(tkVer)
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "DecipherOne/Troglodyte",
"path": "Trog Build Dependencies/Python26/Lib/idlelib/aboutDialog.py",
"copies": "46",
"size": "6825",
"license": "mit",
"hash": -5418792131761544000,
"line_mean": 44.5,
"line_max": 80,
"alpha_frac": 0.5676190476,
"autogenerated": false,
"ratio": 3.4946236559139785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""About Dialog for IDLE
"""
from tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = '.'.join(tkVer)
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "jcoady9/python-for-android",
"path": "python3-alpha/python3-src/Lib/idlelib/aboutDialog.py",
"copies": "55",
"size": "6825",
"license": "apache-2.0",
"hash": 506833609482704900,
"line_mean": 44.5,
"line_max": 80,
"alpha_frac": 0.5676190476,
"autogenerated": false,
"ratio": 3.4946236559139785,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0033308504939797735,
"num_lines": 150
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self, parent, title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
tkVer = self.tk.call('info', 'patchlevel')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(AboutDialog)
| {
"repo_name": "MonicaHsu/truvaluation",
"path": "venv/lib/python2.7/idlelib/aboutDialog.py",
"copies": "2",
"size": "6430",
"license": "mit",
"hash": -6486523243233650000,
"line_mean": 44.9285714286,
"line_max": 80,
"alpha_frac": 0.5712286159,
"autogenerated": false,
"ratio": 3.515582285401859,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5086810901301859,
"avg_score": null,
"num_lines": null
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import string, os
import textView
import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = `TkVersion`.split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = string.join(tkVer,'.')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text(license, 'About - License')
def ShowCopyright(self):
self.display_printer_text(copyright, 'About - Copyright')
def ShowPythonCredits(self):
self.display_printer_text(credits, 'About - Python Credits')
def ShowIDLECredits(self):
self.ViewFile('About - Credits','CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.ViewFile('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.ViewFile('About - NEWS', 'NEWS.txt')
def display_printer_text(self, printer, title):
printer._Printer__setup()
data = '\n'.join(printer._Printer__lines)
textView.TextViewer(self, title, None, data)
def ViewFile(self, viewTitle, viewFile, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), viewFile)
if encoding:
import codecs
try:
textFile = codecs.open(fn, 'r')
except IOError:
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file '+
`fileName`+' .')
return
else:
data = textFile.read()
else:
data = None
textView.TextViewer(self, viewTitle, fn, data=data)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "MalloyPower/parsing-python",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/idlelib/aboutDialog.py",
"copies": "1",
"size": "7225",
"license": "mit",
"hash": -8906380582848233000,
"line_mean": 43.5987654321,
"line_max": 80,
"alpha_frac": 0.5541868512,
"autogenerated": false,
"ratio": 3.582052553296976,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9614551182978035,
"avg_score": 0.004337644303788253,
"num_lines": 162
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import os
from idlelib import textView
from idlelib import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = '.'.join(tkVer)
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text('About - License', license)
def ShowCopyright(self):
self.display_printer_text('About - Copyright', copyright)
def ShowPythonCredits(self):
self.display_printer_text('About - Python Credits', credits)
def ShowIDLECredits(self):
self.display_file_text('About - Credits', 'CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.display_file_text('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.display_file_text('About - NEWS', 'NEWS.txt')
def display_printer_text(self, title, printer):
printer._Printer__setup()
text = '\n'.join(printer._Printer__lines)
textView.view_text(self, title, text)
def display_file_text(self, title, filename, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), filename)
textView.view_file(self, title, fn, encoding)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
from idlelib import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/idlelib/aboutDialog.py",
"copies": "5",
"size": "6975",
"license": "mit",
"hash": -3643744699556664300,
"line_mean": 44.5,
"line_max": 80,
"alpha_frac": 0.5554121864,
"autogenerated": false,
"ratio": 3.537018255578093,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6592430441978092,
"avg_score": null,
"num_lines": null
} |
"""About Dialog for IDLE
"""
from Tkinter import *
import string, os
import textView
import idlever
class AboutDialog(Toplevel):
"""Modal about dialog for idle
"""
def __init__(self,parent,title):
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.geometry("+%d+%d" % (parent.winfo_rootx()+30,
parent.winfo_rooty()+30))
self.bg = "#707070"
self.fg = "#ffffff"
self.CreateWidgets()
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Ok)
self.parent = parent
self.buttonOk.focus_set()
self.bind('<Return>',self.Ok) #dismiss dialog
self.bind('<Escape>',self.Ok) #dismiss dialog
self.wait_window()
def CreateWidgets(self):
frameMain = Frame(self, borderwidth=2, relief=SUNKEN)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
self.buttonOk = Button(frameButtons, text='Close',
command=self.Ok)
self.buttonOk.pack(padx=5, pady=5)
#self.picture = Image('photo', data=self.pictureData)
frameBg = Frame(frameMain, bg=self.bg)
frameBg.pack(expand=TRUE, fill=BOTH)
labelTitle = Label(frameBg, text='IDLE', fg=self.fg, bg=self.bg,
font=('courier', 24, 'bold'))
labelTitle.grid(row=0, column=0, sticky=W, padx=10, pady=10)
#labelPicture = Label(frameBg, text='[picture]')
#image=self.picture, bg=self.bg)
#labelPicture.grid(row=1, column=1, sticky=W, rowspan=2,
# padx=0, pady=3)
byline = "Python's Integrated DeveLopment Environment" + 5*'\n'
labelDesc = Label(frameBg, text=byline, justify=LEFT,
fg=self.fg, bg=self.bg)
labelDesc.grid(row=2, column=0, sticky=W, columnspan=3, padx=10, pady=5)
labelEmail = Label(frameBg, text='email: idle-dev@python.org',
justify=LEFT, fg=self.fg, bg=self.bg)
labelEmail.grid(row=6, column=0, columnspan=2,
sticky=W, padx=10, pady=0)
labelWWW = Label(frameBg, text='www: http://www.python.org/idle/',
justify=LEFT, fg=self.fg, bg=self.bg)
labelWWW.grid(row=7, column=0, columnspan=2, sticky=W, padx=10, pady=0)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=8, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
labelPythonVer = Label(frameBg, text='Python version: ' + \
sys.version.split()[0], fg=self.fg, bg=self.bg)
labelPythonVer.grid(row=9, column=0, sticky=W, padx=10, pady=0)
# handle weird tk version num in windoze python >= 1.6 (?!?)
tkVer = repr(TkVersion).split('.')
tkVer[len(tkVer)-1] = str('%.3g' % (float('.'+tkVer[len(tkVer)-1])))[2:]
if tkVer[len(tkVer)-1] == '':
tkVer[len(tkVer)-1] = '0'
tkVer = string.join(tkVer,'.')
labelTkVer = Label(frameBg, text='Tk version: '+
tkVer, fg=self.fg, bg=self.bg)
labelTkVer.grid(row=9, column=1, sticky=W, padx=2, pady=0)
py_button_f = Frame(frameBg, bg=self.bg)
py_button_f.grid(row=10, column=0, columnspan=2, sticky=NSEW)
buttonLicense = Button(py_button_f, text='License', width=8,
highlightbackground=self.bg,
command=self.ShowLicense)
buttonLicense.pack(side=LEFT, padx=10, pady=10)
buttonCopyright = Button(py_button_f, text='Copyright', width=8,
highlightbackground=self.bg,
command=self.ShowCopyright)
buttonCopyright.pack(side=LEFT, padx=10, pady=10)
buttonCredits = Button(py_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowPythonCredits)
buttonCredits.pack(side=LEFT, padx=10, pady=10)
Frame(frameBg, borderwidth=1, relief=SUNKEN,
height=2, bg=self.bg).grid(row=11, column=0, sticky=EW,
columnspan=3, padx=5, pady=5)
idle_v = Label(frameBg, text='IDLE version: ' + idlever.IDLE_VERSION,
fg=self.fg, bg=self.bg)
idle_v.grid(row=12, column=0, sticky=W, padx=10, pady=0)
idle_button_f = Frame(frameBg, bg=self.bg)
idle_button_f.grid(row=13, column=0, columnspan=3, sticky=NSEW)
idle_about_b = Button(idle_button_f, text='README', width=8,
highlightbackground=self.bg,
command=self.ShowIDLEAbout)
idle_about_b.pack(side=LEFT, padx=10, pady=10)
idle_news_b = Button(idle_button_f, text='NEWS', width=8,
highlightbackground=self.bg,
command=self.ShowIDLENEWS)
idle_news_b.pack(side=LEFT, padx=10, pady=10)
idle_credits_b = Button(idle_button_f, text='Credits', width=8,
highlightbackground=self.bg,
command=self.ShowIDLECredits)
idle_credits_b.pack(side=LEFT, padx=10, pady=10)
def ShowLicense(self):
self.display_printer_text(license, 'About - License')
def ShowCopyright(self):
self.display_printer_text(copyright, 'About - Copyright')
def ShowPythonCredits(self):
self.display_printer_text(credits, 'About - Python Credits')
def ShowIDLECredits(self):
self.ViewFile('About - Credits','CREDITS.txt', 'iso-8859-1')
def ShowIDLEAbout(self):
self.ViewFile('About - Readme', 'README.txt')
def ShowIDLENEWS(self):
self.ViewFile('About - NEWS', 'NEWS.txt')
def display_printer_text(self, printer, title):
printer._Printer__setup()
data = '\n'.join(printer._Printer__lines)
textView.TextViewer(self, title, None, data)
def ViewFile(self, viewTitle, viewFile, encoding=None):
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), viewFile)
if encoding:
import codecs
try:
textFile = codecs.open(fn, 'r')
except IOError:
import tkMessageBox
tkMessageBox.showerror(title='File Load Error',
message='Unable to load file %r .' % (fn,),
parent=self)
return
else:
data = textFile.read()
else:
data = None
textView.TextViewer(self, viewTitle, fn, data=data)
def Ok(self, event=None):
self.destroy()
if __name__ == '__main__':
# test the dialog
root = Tk()
def run():
import aboutDialog
aboutDialog.AboutDialog(root, 'About')
Button(root, text='Dialog', command=run).pack()
root.mainloop()
| {
"repo_name": "ericlink/adms-server",
"path": "playframework-dist/play-1.1/python/Lib/idlelib/aboutDialog.py",
"copies": "2",
"size": "7436",
"license": "mit",
"hash": -329993661620001900,
"line_mean": 43.6196319018,
"line_max": 82,
"alpha_frac": 0.5420925229,
"autogenerated": false,
"ratio": 3.6290873596876527,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0036203121527878133,
"num_lines": 163
} |
"""About models."""
from slugify import slugify
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils import observes
from pygotham.core import db
from pygotham.events.query import EventQuery
__all__ = ('AboutPage',)
class AboutPage(db.Model):
"""About page."""
__tablename__ = 'about_pages'
query_class = EventQuery
id = db.Column(db.Integer, primary_key=True)
# TODO: validate that the navbar_section / slug combination do not conflict
# with an existing generated blueprint view route
# The navbar_path dictates the location of this menu item in the
# navbar hierarchy.
navbar_path = db.Column(postgresql.ARRAY(db.String), nullable=False)
# A slug may be empty. If it is, the item will be placed at the
# root of the navbar hierarchy.
slug = db.Column(db.String(255), default='', nullable=False)
title = db.Column(db.String(255), nullable=False)
content = db.Column(db.Text, nullable=False)
active = db.Column(db.Boolean, nullable=False)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False,
)
event = db.relationship(
'Event', backref=db.backref('about_pages', lazy='dynamic'),
)
__table_args__ = (
db.UniqueConstraint(
'navbar_path', 'slug', 'event_id',
name='ix_about_pages_navbar_path_slug_event_id',
),
)
def __str__(self):
"""Return a printable representation."""
return self.title
@observes('title')
def _create_slug(self, title):
"""Create the slug for the page."""
if not self.slug:
self.slug = slugify(self.title)
@property
def rst_document(self):
"""Return the full reST document, including the title.
The page's title was be used as the document heading, causing
any headings defined in the page's content to be used as
subheadings. To cut down on potential collisions, ``#`` symbols
will be placed on the lines before and after the title.
"""
lines = ('{divider}', '{page.title}', '{divider}', '{page.content}')
return '\n'.join(lines).format(
divider='#' * len(self.title), page=self)
| {
"repo_name": "PyGotham/pygotham",
"path": "pygotham/about/models.py",
"copies": "2",
"size": "2231",
"license": "bsd-3-clause",
"hash": 6041012963044899000,
"line_mean": 31.8088235294,
"line_max": 79,
"alpha_frac": 0.6355894218,
"autogenerated": false,
"ratio": 3.859861591695502,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 68
} |
"""About models."""
from slugify import slugify
from sqlalchemy_utils import observes
from pygotham.core import db
from pygotham.events.query import EventQuery
__all__ = ('AboutPage',)
class AboutPage(db.Model):
"""About page."""
__tablename__ = 'about_pages'
query_class = EventQuery
id = db.Column(db.Integer, primary_key=True)
# TODO: validate that the navbar_section / slug combination do not conflict
# with an existing generated blueprint view route
navbar_section = db.Column(db.String(255), nullable=False)
slug = db.Column(db.String(255), nullable=False)
title = db.Column(db.String(255), nullable=False)
content = db.Column(db.Text, nullable=False)
active = db.Column(db.Boolean, nullable=False)
event_id = db.Column(
db.Integer, db.ForeignKey('events.id'), nullable=False,
)
event = db.relationship(
'Event', backref=db.backref('about_pages', lazy='dynamic'),
)
__table_args__ = (
db.UniqueConstraint(
'navbar_section', 'slug', 'event_id',
name='ix_about_pages_navbar_section_slug_event_id',
),
)
def __str__(self):
"""Return a printable representation."""
return self.title
@observes('title')
def _create_slug(self, title):
"""Create the slug for the page."""
if not self.slug:
self.slug = slugify(self.title)
@property
def rst_document(self):
"""Return the full reST document, including the title.
The page's title was be used as the document heading, causing
any headings defined in the page's content to be used as
subheadings. To cut down on potential collisions, ``#`` symbols
will be placed on the lines before and after the title.
"""
lines = ('{divider}', '{page.title}', '{divider}', '{page.content}')
return '\n'.join(lines).format(
divider='#' * len(self.title), page=self)
| {
"repo_name": "djds23/pygotham-1",
"path": "pygotham/about/models.py",
"copies": "1",
"size": "1974",
"license": "bsd-3-clause",
"hash": 2502011231694718000,
"line_mean": 30.8387096774,
"line_max": 79,
"alpha_frac": 0.625633232,
"autogenerated": false,
"ratio": 3.833009708737864,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4958642940737864,
"avg_score": null,
"num_lines": null
} |
"""AboutModules handlers for the application.
"""
# stdlib imports
import json
# local imports
from app.forms.about_modules import AboutModuleForm
from app.handlers.templates.admin.base import AdminTemplateHandler
from app.models.about_modules import AboutModule
class AboutModuleHandler(AdminTemplateHandler):
form = AboutModuleForm()
def render(self, template, template_data={}):
template_data.update({
'description': 'Manage your about modules',
'fields': self.form.fields,
'title': 'About Modules',
'type': 'about_modules',
})
return super(AboutModuleHandler, self).render(template, template_data)
class ListHandler(AboutModuleHandler):
def get(self):
self.render('admin/list.html', {
'json_records': json.dumps(AboutModule.fetch_cached_dataset())
})
class DetailHandler(AboutModuleHandler):
def get(self, id=None):
json_record = None
if id:
record = AboutModule.get_by_id(int(id))
if record is None:
self.abort(404)
self.form = AboutModuleForm(None, record)
json_record = json.dumps(record.to_dict())
self.render('admin/form.html', {
'form': self.form,
'json_record': json_record
})
| {
"repo_name": "mjmcconnell/sra",
"path": "src-server/app/handlers/templates/admin/about_modules.py",
"copies": "1",
"size": "1340",
"license": "apache-2.0",
"hash": -6078901102298042000,
"line_mean": 24.7692307692,
"line_max": 78,
"alpha_frac": 0.623880597,
"autogenerated": false,
"ratio": 4.135802469135802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5259683066135802,
"avg_score": null,
"num_lines": null
} |
#About
#'Bit:watch' is a Binary Watch programme written in MicroPython for the BBC Micro:bit by @petejbell and distributed under a MIT licence
#Please share with me what you do with it, I'd love to see what you do!
#You can find a tutorial showing you how to build a strap for your watch here: https://t.co/li9CktVJhg
#Instructions
#1) Download Mu from here: https://github.com/ntoll/mu
#2) Copy and paste this BitWatch code to Mu, connect your Micro:bit to your computer and then flash the code to your Micro:bit
#3) The BitWatch will display 18:50 as the time for 10 seconds and will then show '18:51'.
# Use Button A to set the Hours and B to set the Minutes. Press each one and you will see the hours/minutes increment on the Micro:bit and the Repl console.
# Use Buttons A+B together to reset seconds to '0'.
#
# Column 0 shows the first digit in the hours (in 24hr clock)
# Column 1 shows the second digit.
# Column 2 shows the seconds flashing away.
# Column 3 shows the first digit in the minutes
# Column 4 shows the second digit.
#For a crash course on binary, see here: http://www.bbc.co.uk/education/guides/z26rcdm/revision/2
#Sets up microbit
from microbit import *
#Sets time variables
hrs = 18
mins = 50
sec = 50
hours = []
minutes = []
seconds = []
#Sets brightness of time digits
b = 9
#defines functions to display time digits
def one(x):
zero(x)
display.set_pixel(x, 3, b),
def two(x):
zero(x)
display.set_pixel(x, 2, b),
def three(x):
zero(x)
display.set_pixel(x, 3, b)
display.set_pixel(x, 2, b),
def four(x):
zero(x)
display.set_pixel(x, 1, b),
def five(x):
zero(x)
display.set_pixel(x, 3, b)
display.set_pixel(x, 1, b),
def six(x):
zero(x)
display.set_pixel(x, 2, b)
display.set_pixel(x, 1, b),
def seven(x):
zero(x)
display.set_pixel(x, 1, b)
display.set_pixel(x, 2, b)
display.set_pixel(x, 3, b),
def eight(x):
zero(x)
display.set_pixel(x, 0, b),
def nine(x):
zero(x)
display.set_pixel(x, 0, b)
display.set_pixel(x, 3, b),
def zero(x):
for i in range(0,4):
display.set_pixel(x, i, 0)
#function to create ticking seconds
def fadesecs(x):
display.set_pixel(2, 2, x)
display.set_pixel(2, 1, x)
#functions to create a background to show the binary display 'area' (There must be a more efficient way of doing this! Tweet me @petejbell if you can help!)
def background(x,y):
if display.get_pixel(x, y) < 1: #checks if each pixel is turned off
display.set_pixel(x, y, 1) #if so, sets the pixel to a value of 1
def backgrounds():
for i in range(4): #misses the flashing seconds column (2) and the last row
background(0, i)
background(1, i)
background(3, i)
background(4, i)
#function to print the time to Repl in MU f(or testing/debugging)
def printtime():
print(str(hours)+":"+str(minutes)+":"+str(seconds))
#a list of binaries to be used by the function 'displaybinaries' (below)
binaries = [one, two, three, four, five, six, seven, eight, nine, zero]
#function to show the time in binary using the time digits and binaries functions; with the list of functions ('binaries' above)
def displaybinaries():
global mins #each variable must be defined as 'global' (otherwise the function thinks they are defined 'locally', within itself)
global hrs
global minutes
global hours
if mins<10:
binaries[mins-1](4) #sets column 4 to digit from minutes (if mins between 0 and 9)
zero(3) #clears column 3
backgrounds() #calls the backgrounds to (dimly) light 'off' pixels
elif mins > 9:
minutes = [int(i) for i in str(mins)] #creates a list of two digits from the string of mins
binaries[minutes[0]-1](3) #calls the binaries function to display the first digit
binaries[minutes[1]-1](4) #calls the binaries function to display the second digit
backgrounds()
if hrs<10:
binaries[hrs-1](1)
zero(0)
backgrounds()
elif hrs > 9:
hours = [int(i) for i in str(hrs)]
binaries[hours[0]-1](0)
binaries[hours[1]-1](1)
backgrounds()
#function to check if buttons pressed and increment mins/secs accordingly
def sleepbutton(x):
global sec
global hrs
global mins
if button_a.was_pressed():
if hrs < 24:
hrs += 1
else:
hrs = 0
displaybinaries()
print(hrs)
if button_b.was_pressed():
if mins < 60:
mins += 1
sec = 0
else:
mins = 0
sec = 0
displaybinaries()
print(mins)
#if button_a.is_pressed() and button_b.is_pressed(): # This doesn't work. I don't know why :(
# if sec < 60:
# sec = 1
# displaybinaries()
sleep(x)
while True:
for i in range(0,5): #iterates 5 times (x 100 = 500)... but....
sleepbutton(99) #The code runs a little slow/fast. Play with this number to get it accurate!
fadesecs(1) #calls function to 'flash' seconds
for i in range(0,5): #iterates 5 times again
sleepbutton(98) #see above
fadesecs(4) #calls function to 'flash' seconds
sec += 1
if sec % 60 == 0: #this section increments time
mins += 1
if mins % 60 == 0:
hrs += 1
mins = 0
if hrs % 24 == 0:
hrs = 0
seconds=str(sec)
minutes=str(mins)
hours=str(hrs)
printtime()
displaybinaries()
| {
"repo_name": "petejbell/BitWatch",
"path": "BitWatch.py",
"copies": "1",
"size": "5679",
"license": "mit",
"hash": -8434066833711867000,
"line_mean": 32.8035714286,
"line_max": 159,
"alpha_frac": 0.6145448142,
"autogenerated": false,
"ratio": 3.334703464474457,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44492482786744564,
"avg_score": null,
"num_lines": null
} |
about = """
^
/ \\
/ \\
/ \\
/ \\
/ \\
/ \\
| IRC Hack |
| |
| |
| |
| A game by |
| Gustavo |
| Ramos |
| Rehermann |
-~=<=============>=~-
\\6046|/
|6046
6046|
|6046
A MUD-like IRC game about
exploring, non-Euclidean
rooms, corridors and reaching
downstairs for the next level
(a rather rogue one).
Encountering monsters, destroying
the Seal of Yendor and bringing
back the salvation for the entire
Upperland!
And then, in Chapter 2, sealing
Gehennom as you accidentally opened
it for the Amulet.
Chapter 3 COMING SOON!
I accept storyboard donations for
the Chapter 3: Upperland Community
at the following e-mail:
gugurehermann@gmail.com
"""
about.strip("\n")
import plugincon
import numpy
import random
import gamethinker
import wordgen
import pylab as plt
import multiprocessing
class SequentialDict(object):
def __init__(self, x=[]):
self._keys = [x[0] for y in x]
self._values = [x[1] for y in x]
@classmethod
def from_pairs(cls, key, value):
return cls(zip(key, value))
@classmethod
def fill(cls, key, value, size):
return cls(((key, value),) * size)
@classmethod
def empty(cls):
return cls([])
def __iter__(self):
return self.keys()
def has_value(self, value):
return value in self.values()
def keys(self):
return self._keys
def insert(self, key, value, index=0):
self._keys.insert(index, key)
self._values.insert(index, value)
def __setitem__(self, key, value):
self._keys.append(key)
self._values.append(value)
def __getitem__(self, key):
if type(key) is int:
return self._values[key]
else:
for k, v in zip(self._keys, self._values):
if k == key:
return v
raise KeyError("Key '{}' not found in this SequentialDict!\nKeys available: {}{}\nAccess the SequentialDict's keys() function for more.".format(
key,
", ".join(repr(x) for x in self._values[:9]),
("..." if len(self._values) > 9 else "")
))
def __add__(self, other):
if type(other) is dict or issubclass(type(other), dict):
self._keys.extend(other.keys())
self._values.extend(other.values())
return self
raise ValueError("{} must be a dict-like class!".format(other))
def __sub__(self, other):
if type(other) is dict or issubclass(type(other), dict):
self._keys = list(other.keys() + self._keys)
self._values = list(other.values() + self._values)
return self
raise ValueError("{} must be a dict-like class!".format(other))
def extend(self, other):
self += other
return self
def items(self):
return zip(self.keys(), self.values())
def start_chunk(other_room):
def __wrapper__(x, y, z):
return Chunk(other_room)
return __wrapper__
class Room(object):
def __init__(self, game, level):
self.light = random.random()
self.chunks = numpy.array([[[Chunk(game, level, self) for _ in xrange(2)] for _ in xrange(2)] for _ in xrange(1)])
self.name = wordgen.gen_word(1, 4)
self.links = []
self.all_stuff = []
self.game = game
self.level = level
for a, x in enumerate(list(self.chunks)):
for b, y in enumerate(x):
for c, z in enumerate(y):
if z.objects != []:
for o in z.objects:
if o.visible(self.light):
self.all_stuff.append((o.description(), str("{}, {}, {}".format(a, b, c))))
if not self.all_stuff:
self.all_stuff = [["nothing", ""]]
self.descriptor =\
"This is a {} room. Somehow your brain associates it with the name {}. You see {} in here. There are {} corridors: {}.".format(
self.game.light_descriptors[int(self.light * len(self.game.light_descriptors) - 1)],
self.name,
", ".join(["{} {}".format(v, k) for k, v in self.all_stuff]),
len(self.links),
", ".join([x.name for x in self.links])
)
if game.starting_chunk is None:
game.starting_chunk = random.sample(self.chunks, 1)[0]
def new_link(self, link):
self.descriptor =\
"This is a {} room. Somehow your brain associates it with the name {}. You see {} in here. There are {} corridors: {}.".format(
self.game.light_descriptors[int(self.light * len(self.game.light_descriptors) - 1)],
self.name,
", ".join(["{} {}".format(v, k) for k, v in self.all_stuff]),
len(self.links),
", ".join([x.name for x in self.links])
)
class GlobalRoom(Room):
def __init__(self, game, level):
self.game = game
self.level = level
self.chunks = []
self.name = "Dungeons of Doom"
self.light = random.uniform(0.075, 0.5)
self.descriptor =\
"This is a {} corridor.".format(self.game.light_descriptors[int(self.light * len(self.game.light_descriptors) - 1)])
class Chunk(object):
def __init__(self, game, level, parent_room=None):
self.parent_room = parent_room
if not parent_room:
self.parent_room = level.global_room
self.objects = list()
self.game = game
self.level = level
for o, c in game.generation_chance.items():
if c > random.uniform(0, 100):
self.objects.append(o(game, self, level))
self.stuff = ", ".join([str(o.description()) for o in self.objects if o.visible(self.parent_room.light)])
if not self.stuff:
self.stuff = "nothing"
self.descriptor = "You see {} here.".format(self.stuff)
def step_into(self, other):
for o in self.objects:
o.chunk_step(other)
class Corridor(Chunk):
def __init__(self, game, level, rooms):
Chunk.__init__(self, game, level)
self.game = game
self.level = level
self.name = wordgen.gen_word(1, 4)
self.connected_rooms = rooms
self.parent_room.chunks.append(self)
for r in rooms:
r.links.append(self)
r.new_link(self)
class ChunkObject(object):
description = "a rather generic object"
def __init__(self, game, chunk, level):
self.game = game
self.chunk = chunk
self.level = level
def description(self):
return type(self).description
def chunk_step(self, stepper):
pass
def turn(self):
pass
def visible(self, light):
return light > 0.3
def use(self, stepper):
pass
class Downstairs(ChunkObject):
chance = 9
def __init__(self, game, chunk, level):
level.downstairs.append(self)
def use(self, stepper):
stepper.level += 1
class Level(object):
def random_links(self):
if len(self.linked_rooms) == len(self.rooms):
return None
r = random.sample(self.rooms, random.randint(2, 5))
self.linked_rooms.extend(r)
self.linked_rooms = list(set(self.linked_rooms))
return r
def __init__(self, game):
self.game = game
self.downstairs = []
self.global_room = GlobalRoom(game, self)
self.rooms = [Room(game, self) for _ in xrange(random.randint(9, 21) + len(game.level_cache) / random.randint(20, 50))]
self.linked_rooms = []
self.corridors = [Corridor(game, self, (r[0], r[1])) for r in iter(self.random_links, None)]
if not self.downstairs:
c = random.choice([w for y in self.rooms for x in list(y.chunks) for z in x for w in z])
d = Downstairs(game, c, self)
c.objects.append(d)
self.downstairs = [d]
class Game(object):
def __init__(self):
self.light_descriptors = [
"Black",
"Pretty dark",
"Dark",
"Mildly dark",
"Slightly bright",
"Bright",
"White",
]
self.level_cache = []
self.players = SequentialDict()
self.starting_chunk = None
self.generation_chance = {}
def register_object(co):
for x in co.__subclasses__():
self.generation_chance[x] = x.chance
register_object(x)
register_object(ChunkObject)
print "Generating dungeon..."
self.level_cache.append(Level(self))
for _ in xrange(random.randint(20, 50) - 2):
for d in self.level_cache[-1].downstairs:
self.level_cache.append(Level(self))
d.target = self.level_cache[-1]
print "Dungeon generated!"
class Player(object):
def __init__(self, game, name):
self.level = 0
self.chunk = game.starting_chunk
self.name = name
game = Game()
@plugincon.easy_bot_command("ih_resetgame", True)
def reset_irchack(message, raw):
if raw:
return
global game
game = Game()
return "Reset succesfully!"
@plugincon.easy_bot_command("ih_join")
def join_irchack(message, raw):
if raw:
return
global game
if message["nickname"] in game.players.keys():
return "You already joined!"
game.players[message["nickname"]] = Player(game, message["nickname"])
return [x.format(player=message["nickname"], levels=len(game.level_cache)) for x in [
"{player}: Welcome to IRCHack!",
"A game where you are who you want, and let your imagination free",
"as in reading a book; a game where you make out your OWN story against",
"the temible monsters of the depths of the Dangling Dungeons of Doom!",
"...Currently with {levels} flightes of stair down into your quest...",
]]
def player_command(command_name):
def __decorator__(func):
@plugincon.bot_command(command_name)
def __wrapper__(message, connector, index, raw):
if raw:
return
global game
p = message["nickname"]
if p not in game.players.keys():
connector.send_message(
index,
plugincon.get_message_target(connector, message, index),
"You didn't join yet!"
)
return func(message, connector, index, game.players[message["nickname"]], game)
return __wrapper__
return __decorator__
def plot_rooms(num_rooms):
plt.plot(num_rooms)
plt.xlabel("Dungeon Floor")
plt.ylabel("Number of Rooms")
plt.show()
@plugincon.easy_bot_command("ih_plot", True)
def plot_ih_rooms(message, raw):
if raw:
return
global game
num_rooms = []
for l in game.level_cache:
num_rooms.append(len(l.rooms))
job = multiprocessing.Process(target=plot_rooms, args=(num_rooms,))
job.start()
return "Plotted rooms with success!"
| {
"repo_name": "Gustavo6046/GusBot-2",
"path": "plugins/irchack.py",
"copies": "1",
"size": "11268",
"license": "mit",
"hash": -6980256668046021000,
"line_mean": 26.6855036855,
"line_max": 156,
"alpha_frac": 0.5549343273,
"autogenerated": false,
"ratio": 3.712685337726524,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4767619665026524,
"avg_score": null,
"num_lines": null
} |
# About
# this module contains different metrics of uniformity
# and the metrics of quality as well (which support weights, actually)
from __future__ import division, print_function
import numpy
import pandas
from sklearn.base import BaseEstimator
from sklearn.neighbors.unsupervised import NearestNeighbors
from sklearn.utils.validation import column_or_1d
from sklearn.metrics import roc_curve
from .commonutils import check_sample_weight, computeSignalKnnIndices
from . import metrics_utils as ut
from hep_ml.commonutils import take_features, check_xyw, weighted_percentile, check_arrays
__author__ = 'Alex Rogozhnikov'
__all__ = ['sde', 'cvm_flatness', 'theil_flatness']
"""
README on quality metrics
Some notation used here
IsSignal - is really signal
AsSignal - classified as signal
IsBackgroundAsSignal - background, but classified as signal
... and so on. Cute, right?
There are many ways to denote this things
tpr = s = isSasS / isS
fpr = b = isBasS / isB
signal efficiency = tpr = s
background efficiency = isBasB / isB = 1 - fpr
background rejection = background efficiency (physicists don't agree with the last line)
"""
# region Quality metrics
def roc_curve_splitted(data1, data2, sample_weight1=None, sample_weight2=None):
"""Does exactly the same as sklearn.metrics.roc_curve,
but for signal/background predictions kept in different arrays.
Returns: tpr, fpr, thresholds, these are parallel arrays with equal lengths.
"""
sample_weight1 = check_sample_weight(data1, sample_weight=sample_weight1)
sample_weight2 = check_sample_weight(data1, sample_weight=sample_weight2)
data = numpy.concatenate([data1, data2])
sample_weight = numpy.concatenate([sample_weight1, sample_weight2])
labels = numpy.concatenate([numpy.zeros(len(data1)), numpy.ones(len(data2))])
return roc_curve(labels, data, sample_weight=sample_weight)
def compute_sb(y_true, y_pred, sample_weight):
"""Here the passed arguments should be already checked, y_pred is array of 0 and 1"""
total_s = numpy.sum(sample_weight[y_true > 0.5])
total_b = numpy.sum(sample_weight[y_true < 0.5])
s = sample_weight[y_true * y_pred > 0.5].sum()
b = sample_weight[(1 - y_true) * y_pred > 0.5].sum()
return s / total_s, b / total_b
def efficiency_score(y_true, y_pred, sample_weight=None):
"""Efficiency = right classified signal / everything that is really signal
Efficiency == recall, returns -0.1 when ill-defined"""
sample_weight = check_sample_weight(y_true, sample_weight=sample_weight)
assert len(y_true) == len(y_pred), "Different size of arrays"
isSignal = numpy.sum(y_true * sample_weight) - 1e-6
isSignalAsSignal = numpy.sum(y_true * y_pred * sample_weight) + 1e-7
return isSignalAsSignal / isSignal
# the same, but with notifications
# return recall_score(answer, prediction)
def background_efficiency_score(y_true, y_pred, sample_weight=None):
"""BackgroundEfficiency == isBasB / isB == 1 - fpr"""
return efficiency_score(1 - y_true, 1 - y_pred, sample_weight=sample_weight)
def as_signal_score(y_true, y_pred, sample_weight=None):
"""Part of is signal = classified as signal / total amount of events"""
sample_weight = check_sample_weight(y_true, sample_weight)
assert len(y_true) == len(y_pred), "Different size of arrays"
return numpy.sum(y_pred * sample_weight) / numpy.sum(sample_weight)
def sensitivity(y_true, y_score, sample_weight=None):
""" Returns s / sqrt{s+b}
:param y_true: array-like of shape [n_samples] with labels of samples (0 or 1)
:param y_score: array-like of shape [n_samples] with predicted labels (0 or 1)"""
y_true, y_score, sample_weight = \
ut.check_metrics_arguments(y_true, y_score, sample_weight=sample_weight, two_class=True, binary_pred=True)
s, b = compute_sb(y_true, y_score, sample_weight=sample_weight)
return s / numpy.sqrt(s + b + 1e-6)
def optimal_sensitivity(y_true, y_score, sample_weight=None):
"""s,b are normalized to be in [0,1] """
from sklearn.metrics import roc_curve
b, s, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
return numpy.max(s / numpy.sqrt(s + b + 1e-6))
# endregion
"""
README on flatness
this metrics are unfortunately more complicated than usual ones
and require more information: not only predictions and classes,
but also mass (or other variables along which we want to hav uniformity)
Here we compute the different metrics of uniformity of predictions:
SDE - the standard deviation of efficiency
Theil- Theil index of Efficiency (Theil index is used in economics)
KS - based on Kolmogorov-Smirnov distance between distributions
CVM - based on Cramer-von Mises similarity between distributions
"""
# region Uniform metrics (current version)
class AbstractMetric(BaseEstimator):
def fit(self, X, y, sample_weight=None):
"""
If metrics needs some initial heavy computations,
this can be done here.
interface is the same as for
"""
pass
def __call__(self, y, proba, sample_weight):
"""
Compute value of metrics
:param proba: numpy.array of shape [n_samples, n_classes]
with predicted probabilities (typically returned by predict_proba)
Events should be passed in the same order, as to method fit
"""
raise NotImplementedError('To be derived by descendant')
class AbstractBinMetrics(AbstractMetric):
def __init__(self, n_bins, uniform_features, uniform_label=0):
"""
Abstract class for bin-based metrics of uniformity.
:param n_bins: int, number of bins along each axis
:param uniform_features: list of strings, features along which uniformity is desired ()
:param uniform_label: int, label of class in which uniformity is desired
(typically, 0 is bck, 1 is signal)
"""
self.uniform_label = uniform_label
self.uniform_features = uniform_features
self.n_bins = n_bins
def fit(self, X, y, sample_weight=None):
""" Prepare different things for fast computation of metrics """
X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight)
self._mask = numpy.array(y == self.uniform_label)
assert sum(self._mask) > 0, 'No event of class, along which uniformity is desired'
self._masked_weight = sample_weight[self._mask]
X_part = numpy.array(take_features(X, self.uniform_features))[self._mask, :]
self._bin_indices = ut.compute_bin_indices(X_part=X_part, n_bins=self.n_bins)
self._bin_weights = ut.compute_bin_weights(bin_indices=self._bin_indices,
sample_weight=self._masked_weight)
class BinBasedSDE(AbstractBinMetrics):
def __init__(self, uniform_features, n_bins=10, uniform_label=0, target_rcp=None, power=2.):
AbstractBinMetrics.__init__(self, n_bins=n_bins,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
self.target_rcp = target_rcp
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
if self.target_rcp is None:
self.target_rcp = [0.5, 0.6, 0.7, 0.8, 0.9]
result = 0.
cuts = weighted_percentile(y_pred, self.target_rcp, sample_weight=self._masked_weight)
for cut in cuts:
bin_efficiencies = ut.compute_bin_efficiencies(y_pred, bin_indices=self._bin_indices,
cut=cut, sample_weight=self._masked_weight)
result += ut.weighted_deviation(bin_efficiencies, weights=self._bin_weights, power=self.power)
return (result / len(cuts)) ** (1. / self.power)
class BinBasedTheil(AbstractBinMetrics):
def __init__(self, uniform_features, n_bins=10, uniform_label=0, target_rcp=None, power=2.):
AbstractBinMetrics.__init__(self, n_bins=n_bins,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
self.target_rcp = target_rcp
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
if self.target_rcp is None:
self.target_rcp = [0.5, 0.6, 0.7, 0.8, 0.9]
result = 0.
cuts = weighted_percentile(y_pred, self.target_rcp, sample_weight=self._masked_weight)
for cut in cuts:
bin_efficiencies = ut.compute_bin_efficiencies(y_pred, bin_indices=self._bin_indices,
cut=cut, sample_weight=self._masked_weight)
result += ut.theil(bin_efficiencies, weights=self._bin_weights)
return result / len(cuts)
class BinBasedCvM(AbstractBinMetrics):
def __init__(self, uniform_features, n_bins=10, uniform_label=0, power=2.):
AbstractBinMetrics.__init__(self, n_bins=n_bins,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
global_data, global_weight, global_cdf = ut.prepare_distibution(y_pred, weights=self._masked_weight)
result = 0.
for bin, bin_weight in enumerate(self._bin_weights):
if bin_weight <= 0:
continue
bin_mask = self._bin_indices == bin
local_distribution = y_pred[bin_mask]
local_weights = self._masked_weight[bin_mask]
result += bin_weight * ut._cvm_2samp_fast(global_data, local_distribution,
global_weight, local_weights, global_cdf)
class AbstractKnnMetrics(AbstractMetric):
def __init__(self, uniform_features, n_neighbours=50, uniform_label=0):
"""
Abstract class for knn-based metrics of uniformity.
:param n_neighbours: int, number of neighbours
:param uniform_features: list of strings, features along which uniformity is desired ()
:param uniform_label: int, label of class in which uniformity is desired
(typically, 0 is bck, 1 is signal)
"""
self.uniform_label = uniform_label
self.uniform_features = uniform_features
self.n_neighbours = n_neighbours
def fit(self, X, y, sample_weight=None):
""" Prepare different things for fast computation of metrics """
X, y, sample_weight = check_xyw(X, y, sample_weight=sample_weight)
self._mask = numpy.array(y == self.uniform_label)
assert sum(self._mask) > 0, 'No events of uniform class!'
self._masked_weight = sample_weight[self._mask]
X_part = numpy.array(take_features(X, self.uniform_features))[self._mask, :]
# computing knn indices
neighbours = NearestNeighbors(n_neighbors=self.n_neighbours, algorithm='kd_tree').fit(X_part)
_, self._groups_indices = neighbours.kneighbors(X_part)
self._group_weights = ut.compute_group_weights(self._groups_indices, sample_weight=self._masked_weight)
class KnnBasedSDE(AbstractKnnMetrics):
def __init__(self, uniform_features, n_neighbours=50, uniform_label=0, target_rcp=None, power=2.):
AbstractKnnMetrics.__init__(self, n_neighbours=n_neighbours,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
self.target_rcp = target_rcp
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
if self.target_rcp is None:
self.target_rcp = [0.5, 0.6, 0.7, 0.8, 0.9]
self.target_rcp = numpy.array(self.target_rcp)
result = 0.
cuts = weighted_percentile(y_pred, percentiles=1 - self.target_rcp, sample_weight=self._masked_weight)
for cut in cuts:
groups_efficiencies = ut.compute_group_efficiencies(y_pred, groups_indices=self._groups_indices, cut=cut,
sample_weight=self._masked_weight)
result += ut.weighted_deviation(groups_efficiencies, weights=self._group_weights, power=self.power)
return (result / len(cuts)) ** (1. / self.power)
class KnnBasedTheil(AbstractKnnMetrics):
def __init__(self, uniform_features, n_neighbours=50, uniform_label=0, target_rcp=None, power=2.):
AbstractKnnMetrics.__init__(self, n_neighbours=n_neighbours,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
self.target_rcp = target_rcp
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
if self.target_rcp is None:
self.target_rcp = [0.5, 0.6, 0.7, 0.8, 0.9]
self.target_rcp = numpy.array(self.target_rcp)
result = 0.
cuts = weighted_percentile(y_pred, percentiles=1 - self.target_rcp, sample_weight=self._masked_weight)
for cut in cuts:
groups_efficiencies = ut.compute_group_efficiencies(y_pred, groups_indices=self._groups_indices, cut=cut,
sample_weight=self._masked_weight)
result += ut.weighted_deviation(groups_efficiencies, weights=self._group_weights, power=self.power)
return (result / len(cuts)) ** (1. / self.power)
class KnnBasedCvM(AbstractKnnMetrics):
def __init__(self, uniform_features, n_neighbours=50, uniform_label=0, power=2.):
AbstractKnnMetrics.__init__(self, n_neighbours=n_neighbours,
uniform_features=uniform_features,
uniform_label=uniform_label)
self.power = power
def __call__(self, y, proba, sample_weight):
y_pred = proba[self._mask, self.uniform_label]
result = 0.
global_data, global_sample_weight, global_cdf = ut.prepare_distibution(y_pred, weights=self._masked_weight)
for group, group_weight in zip(self._groups_indices, self._group_weights):
local_distribution = y_pred[group]
local_sample_weights = self._masked_weight[group]
result += group_weight * ut._cvm_2samp_fast(global_data, local_distribution,
global_sample_weight, local_sample_weights, global_cdf)
return result
# endregion
# region Uniformity metrics (old version)
"""
Comments on the old interface:
Mask is needed to show the events of needed class,
for instance, if we want to compute the uniformity on signal predictions,
mask should be True on signal events and False on the others.
y_score in usually predicted probabilities of event being a needed class.
So, if I want to compute efficiency on signal, I put:
mask = y == 1
y_pred = clf.predict_proba[:, 1]
If want to do it for bck:
mask = y == 0
y_pred = clf.predict_proba[:, 0]
"""
def sde(y, proba, X, uniform_variables, sample_weight=None, label=1, knn=30):
""" The most simple way to compute SDE, this is however very slow
if you need to recompute SDE many times
:param y: real classes of events, shape = [n_samples]
:param proba: predicted probabilities, shape = [n_samples, n_classes]
:param X: pandas.DataFrame with uniform features
:param uniform_variables: features, along which uniformity is desired, list of strings
:param sample_weight: weights of events, shape = [n_samples]
:param label: class, for which uniformity is measured (usually, 0 is bck, 1 is signal)
:param knn: number of nearest neighbours used in knn
Example of usage:
proba = classifier.predict_proba(testX)
sde(testY, proba=proba, X=testX, uniform_variables=['mass'])
"""
y, proba = check_arrays(y, proba)
assert len(y) == len(proba) == len(X), 'Different lengths'
y = column_or_1d(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
X = pandas.DataFrame(X)
mask = y == label
groups = computeSignalKnnIndices(uniform_variables=uniform_variables, dataframe=X, is_signal=mask, n_neighbors=knn)
groups = groups[mask, :]
return ut.compute_sde_on_groups(proba[:, label], mask=mask, groups_indices=groups,
target_efficiencies=[0.5, 0.6, 0.7, 0.8, 0.9], sample_weight=sample_weight)
def theil_flatness(y, proba, X, uniform_variables, sample_weight=None, label=1, knn=30):
"""This is ready-to-use function, and it is quite slow to use many times"""
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
mask = y == label
groups_indices = computeSignalKnnIndices(uniform_variables, X, is_signal=mask, n_neighbors=knn)[mask, :]
return ut.compute_theil_on_groups(proba[:, label], mask=mask, groups_indices=groups_indices,
target_efficiencies=[0.5, 0.6, 0.7, 0.8, 0.9], sample_weight=sample_weight)
def cvm_flatness(y, proba, X, uniform_variables, sample_weight=None, label=1, knn=30):
""" The most simple way to compute Cramer-von Mises flatness, this is however very slow
if you need to compute it many times
:param y: real classes of events, shape = [n_samples]
:param proba: predicted probabilities, shape = [n_samples, n_classes]
:param X: pandas.DataFrame with uniform features (i.e. test dataset)
:param uniform_variables: features, along which uniformity is desired, list of strings
:param sample_weight: weights of events, shape = [n_samples]
:param label: class, for which uniformity is measured (usually, 0 is bck, 1 is signal)
:param knn: number of nearest neighbours used in knn
Example of usage:
proba = classifier.predict_proba(testX)
cvm_flatness(testY, proba=proba, X=testX, uniform_variables=['mass'])
"""
y, proba = check_arrays(y, proba)
assert len(y) == len(proba) == len(X), 'Different lengths'
y = column_or_1d(y)
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
X = pandas.DataFrame(X)
signal_mask = y == label
groups_indices = computeSignalKnnIndices(uniform_variables=uniform_variables, dataframe=X,
is_signal=signal_mask, n_neighbors=knn)
groups_indices = groups_indices[signal_mask, :]
return ut.group_based_cvm(proba[:, label], mask=signal_mask, groups_indices=groups_indices,
sample_weight=sample_weight)
# endregion
| {
"repo_name": "anaderi/lhcb_trigger_ml",
"path": "hep_ml/metrics.py",
"copies": "1",
"size": "18871",
"license": "mit",
"hash": 3714703360272555000,
"line_mean": 42.4815668203,
"line_max": 119,
"alpha_frac": 0.6467065868,
"autogenerated": false,
"ratio": 3.565274891365955,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47119814781659547,
"avg_score": null,
"num_lines": null
} |
# About
# This module contains functions to build reports:
# training, getting predictions,
# building various plots, calculating metrics
from __future__ import print_function, division, absolute_import
from itertools import islice
from collections import OrderedDict
import time
import warnings
import numpy
import pandas
import matplotlib.pyplot as pylab
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.utils.validation import column_or_1d
from matplotlib import cm
from scipy.stats import pearsonr
from .commonutils import compute_bdt_cut, \
check_sample_weight, build_normalizer, computeSignalKnnIndices, map_on_cluster, check_arrays
from .metrics_utils import compute_sde_on_bins, compute_sde_on_groups, compute_theil_on_bins, \
bin_based_cvm, bin_based_ks
from .metrics_utils import compute_bin_efficiencies, compute_bin_weights, compute_bin_indices
__author__ = 'Alex Rogozhnikov'
def train_classifier(name_classifier, X, y, sample_weight=None):
""" Trains one classifier on a separate node in cluster,
:param name_classifier: 2-tuple (name, classifier)
"""
start_time = time.time()
if sample_weight is None:
name_classifier[1].fit(X, y)
else:
name_classifier[1].fit(X, y, sample_weight=sample_weight)
spent_time = time.time() - start_time
return name_classifier, spent_time
class ClassifiersDict(OrderedDict):
"""A collection of classifiers, which will be trained simultaneously
and after that will be compared"""
def fit(self, X, y, sample_weight=None, ipc_profile=None):
"""Trains all classifiers on the same train data,
if ipc_profile in not None, it is used as a name of IPython cluster to use for parallel computations"""
start_time = time.time()
result = map_on_cluster(ipc_profile, train_classifier,
self.items(),
[X] * len(self),
[y] * len(self),
[sample_weight] * len(self))
total_train_time = time.time() - start_time
for (name, classifier), clf_time in result:
self[name] = classifier
print("Classifier %12s is learnt in %.2f seconds" % (name, clf_time))
if ipc_profile is None:
print("Totally spent %.2f seconds on training" % total_train_time)
else:
print("Totally spent %.2f seconds on parallel training" % total_train_time)
return self
def test_on(self, X, y, sample_weight=None):
return Predictions(self, X, y, sample_weight=sample_weight)
class Predictions(object):
def __init__(self, classifiers_dict, X, y, sample_weight=None, low_memory=None):
"""The main object for different reports and plots,
computes predictions of different classifiers on the same test data sets
and makes it possible to compute different metrics,
plot some quality curves and so on
"""
assert isinstance(classifiers_dict, OrderedDict)
if low_memory is not None:
warnings.warn("Low memory argument is deprecated", DeprecationWarning)
self.X = X
self.y = column_or_1d(numpy.array(y, dtype=int))
self.sample_weight = sample_weight
assert len(X) == len(y), 'Different lengths'
self.n_samples = len(y)
self.checked_sample_weight = check_sample_weight(y, sample_weight=sample_weight)
self.predictions = OrderedDict([(name, classifier.predict_proba(X))
for name, classifier in classifiers_dict.items()])
self.staged_predictions = None
self.classifiers = classifiers_dict
# region Checks
@staticmethod
def _check_efficiencies(efficiencies):
if efficiencies is None:
return numpy.array([0.6, 0.7, 0.8, 0.9])
else:
return numpy.array(efficiencies, dtype=numpy.float)
def _check_mask(self, mask):
"""Checks whether the mask is appropriate and normalizes it"""
if mask is None:
return numpy.ones(len(self.y), dtype=numpy.bool)
assert len(mask) == len(self.y), 'wrong size of mask'
assert numpy.result_type(mask) == numpy.bool, 'the mask should be boolean'
return mask
# endregion
# region Mappers - function that apply functions to predictions
def _get_staged_proba(self):
result = OrderedDict()
for name, classifier in self.classifiers.items():
try:
result[name] = classifier.staged_predict_proba(self.X)
except AttributeError:
pass
return result
def _get_stages(self, stages):
result = OrderedDict()
if stages is None:
for name, preds in self.predictions.items():
result[name] = pandas.Series(data=[preds], index=['result'])
else:
stages = set(stages)
for name, stage_preds in self._get_staged_proba().items():
result[name] = pandas.Series()
for stage, pred in enumerate(stage_preds):
if stage not in stages:
continue
result[name].loc[stage] = numpy.copy(pred)
return result
def _map_on_staged_proba(self, function, step=1):
"""Applies a function to every step-th stage of each classifier
returns: {name: Series[stage_name, result]}
:param function: should take the only argument, predict_proba of shape [n_samples, 2]
:param int step: the function is applied to every step'th iteration
"""
result = OrderedDict()
for name, staged_proba in self._get_staged_proba().items():
result[name] = pandas.Series()
for stage, pred in islice(enumerate(staged_proba), step - 1, None, step):
result[name].loc[stage] = function(pred)
return result
def _map_on_stages(self, function, stages=None):
"""
:type function: takes prediction proba of shape [n_samples, n_classes] and returns something
:type stages: list(int) | NoneType, the list of stages we calculate metrics on
:rtype: dict[str, pandas.Series]"""
selected_stages = self._get_stages(stages)
result = OrderedDict()
for name, staged_proba in selected_stages.items():
result[name] = staged_proba.apply(function)
return result
def _plot_on_stages(self, plotting_function, stages=None):
"""Plots in each line results for the same stage,
plotting_function should have following interface:
plotting_function(y_true, y_proba, sample_weight), y_proba has shape [n_samples, n_features] """
selected_stages = pandas.DataFrame(self._get_stages(stages))
for stage_name, stage_predictions in selected_stages.iterrows():
print('Stage ' + str(stage_name))
self._strip_figure(len(stage_predictions))
for i, (name, probabilities) in enumerate(stage_predictions.items(), start=1):
pylab.subplot(1, len(stage_predictions), i)
pylab.title(name)
plotting_function(self.y, probabilities, sample_weight=self.sample_weight)
pylab.show()
def _plot_curves(self, function, step):
"""
:param function: should take proba od shape [n_samples, n_classes]
"""
result = self._map_on_staged_proba(function=function, step=step)
for name, values in result.items():
pylab.plot(values.keys(), values, label=name)
pylab.xlabel('stage')
return result
# endregion
# region Quality-related methods
def roc(self, stages=None, new_figure=True):
proba_on_stages = pandas.DataFrame(self._get_stages(stages))
n_stages = len(proba_on_stages)
if new_figure:
self._strip_figure(n_stages)
for i, (stage_name, proba_on_stage) in enumerate(proba_on_stages.iterrows()):
pylab.subplot(1, n_stages, i + 1), pylab.title("stage " + str(stage_name))
pylab.title('ROC at stage ' + str(stage_name))
pylab.plot([0, 1], [1, 0], 'k--')
pylab.xlim([0., 1.003]), pylab.xlabel('Signal Efficiency')
pylab.ylim([0., 1.003]), pylab.ylabel('Background Rejection')
for classifier_name, predictions in proba_on_stage.iteritems():
plot_roc(self.y, predictions[:, 1], sample_weight=self.sample_weight,
classifier_name=classifier_name)
pylab.legend(loc="lower left")
return self
def prediction_pdf(self, stages=None, histtype='step', bins=30, show_legend=False):
proba_on_stages = pandas.DataFrame(self._get_stages(stages))
for stage_name, proba_on_stage in proba_on_stages.iterrows():
self._strip_figure(len(proba_on_stage))
for i, (clf_name, predict_proba) in enumerate(proba_on_stage.iteritems(), 1):
pylab.subplot(1, len(proba_on_stage), i)
for label in numpy.unique(self.y):
pylab.hist(predict_proba[self.y == label, label], histtype=histtype, bins=bins, label=str(label))
pylab.title('Predictions of %s at stage %s' % (clf_name, str(stage_name)))
if show_legend:
pylab.legend()
pylab.show()
def learning_curves(self, metrics=roc_auc_score, step=1, label=1, mask=None):
y_true = (self.y == label) * 1
mask = self._check_mask(mask)
self._plot_curves(lambda p: metrics(y_true[mask], p[mask, label], sample_weight=self.sample_weight), step=step)
pylab.legend(loc="lower right")
pylab.xlabel("stage"), pylab.ylabel("ROC AUC")
def compute_metrics(self, stages=None, metrics=roc_auc_score, label=1):
""" Computes arbitrary metrics on selected stages
:param stages: array-like of stages or None
:param metrics: (numpy.array, numpy.array, numpy.array | None) -> float,
any metrics with interface (y_true, y_pred, sample_weight=None), where y_pred of shape [n_samples] of float
:return: pandas.DataFrame with computed values
"""
def _compute_metrics(proba):
return metrics((self.y == label) * 1, proba[:, label], sample_weight=self.sample_weight)
return pandas.DataFrame(self._map_on_stages(_compute_metrics, stages=stages))
#endregion
#region Uniformity-related methods
def _compute_bin_indices(self, var_names, n_bins=20, mask=None):
"""Mask is used to show events that will be binned afterwards
(for instance if only signal events will be binned, then mask= y == 1)"""
for var in var_names:
assert var in self.X.columns, "the variable %i is not in dataset" % var
mask = self._check_mask(mask)
bin_limits = []
for var_name in var_names:
var_data = self.X.loc[mask, var_name]
bin_limits.append(numpy.linspace(numpy.min(var_data), numpy.max(var_data), n_bins + 1)[1: -1])
return compute_bin_indices(self.X.ix[:, var_names].values, bin_limits=bin_limits)
def _compute_nonempty_bins_mask(self, var_names, n_bins=20, mask=None):
return numpy.bincount(self._compute_bin_indices(var_names, n_bins=n_bins, mask=mask),
minlength=n_bins ** len(var_names)) > 0
def _compute_bin_masscenters(self, var_name, n_bins=20, mask=None):
bin_indices = self._compute_bin_indices([var_name], n_bins=n_bins, mask=mask)
result = []
for bin in range(numpy.max(bin_indices) + 1):
result.append(numpy.median(self.X.ix[(bin_indices == bin) & mask, var_name]))
return numpy.array(result)
def _compute_bin_centers(self, var_names, n_bins=20, mask=None):
"""Mask is used to show events that will be binned after"""
bin_centers = []
mask = self._check_mask(mask)
for var_name in var_names:
var_data = self.X.loc[mask, var_name]
bin_centers.append(numpy.linspace(numpy.min(var_data), numpy.max(var_data), 2 * n_bins + 1)[1::2])
assert len(bin_centers[-1]) == n_bins
return bin_centers
def sde_curves(self, uniform_variables, target_efficiencies=None, n_bins=20, step=3, power=2., label=1,
return_data=False):
mask = self.y == label
bin_indices = self._compute_bin_indices(uniform_variables, n_bins=n_bins, mask=mask)
target_efficiencies = self._check_efficiencies(target_efficiencies)
def compute_sde(pred):
return compute_sde_on_bins(pred[:, label], mask=mask, bin_indices=bin_indices,
target_efficiencies=target_efficiencies, power=power,
sample_weight=self.checked_sample_weight)
result = self._plot_curves(compute_sde, step=step)
pylab.xlabel("stage"), pylab.ylabel("SDE")
pylab.ylim(0, pylab.ylim()[1] * 1.15)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3, fancybox=True, shadow=True)
if return_data:
return result
def sde_knn_curves(self, uniform_variables, target_efficiencies=None, knn=30, step=3, power=2, label=1,
return_data=True):
"""Warning: this functions is very slow, specially on large datasets"""
mask = self.y == label
knn_indices = computeSignalKnnIndices(uniform_variables, self.X, is_signal=mask, n_neighbors=knn)
knn_indices = knn_indices[mask, :]
target_efficiencies = self._check_efficiencies(target_efficiencies)
def compute_sde(pred):
return compute_sde_on_groups(pred[:, label], mask, groups_indices=knn_indices,
target_efficiencies=target_efficiencies,
power=power, sample_weight=self.sample_weight)
result = self._plot_curves(compute_sde, step=step)
pylab.xlabel("stage"), pylab.ylabel("SDE")
pylab.ylim(0, pylab.ylim()[1] * 1.15)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3, fancybox=True, shadow=True)
if return_data:
return result
def theil_curves(self, uniform_variables, target_efficiencies=None, n_bins=20, label=1, step=3, return_data=True):
mask = self.y == label
bin_indices = self._compute_bin_indices(uniform_variables, n_bins=n_bins, mask=mask)
target_efficiencies = self._check_efficiencies(target_efficiencies)
def compute_theil(pred):
return compute_theil_on_bins(pred[:, label], mask=mask, bin_indices=bin_indices,
target_efficiencies=target_efficiencies,
sample_weight=self.checked_sample_weight)
result = self._plot_curves(compute_theil, step=step)
pylab.ylabel("Theil Index")
pylab.ylim(0, pylab.ylim()[1] * 1.15)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3, fancybox=True, shadow=True)
if return_data:
return result
def ks_curves(self, uniform_variables, n_bins=20, label=1, step=3, return_data=True):
mask = self.y == label
bin_indices = self._compute_bin_indices(uniform_variables, n_bins=n_bins, mask=mask)
def compute_ks(pred):
return bin_based_ks(pred[:, label], mask=mask, bin_indices=bin_indices,
sample_weight=self.checked_sample_weight)
result = self._plot_curves(compute_ks, step=step)
pylab.ylabel("KS flatness")
pylab.ylim(0, pylab.ylim()[1] * 1.15)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3, fancybox=True, shadow=True)
if return_data:
return result
def cvm_curves(self, uniform_variables, n_bins=20, label=1, step=3, power=1., return_data=True):
"""power = 0.5 to compare with SDE"""
mask = self.y == label
bin_indices = self._compute_bin_indices(uniform_variables, n_bins=n_bins, mask=mask)
def compute_cvm(pred):
return bin_based_cvm(pred[mask, label], bin_indices=bin_indices[mask],
sample_weight=self.checked_sample_weight[mask]) ** power
result = self._plot_curves(compute_cvm, step=step)
pylab.ylabel('CvM flatness')
pylab.ylim(0, pylab.ylim()[1] * 1.15)
pylab.legend(loc='upper center', bbox_to_anchor=(0.5, 1.00), ncol=3, fancybox=True, shadow=True)
if return_data:
return result
def rcp(self, variable, global_rcp=None, n_bins=20, label=1,
new_plot=True, ignored_sidebands=0., range=None, marker='.',
show_legend=True, multiclassification=False, adjust_n_bins=True, mask=None,
median_centers=True, compute_cuts_for_other_class=False, print_cut=False):
"""
Right-classified part. This is efficiency for signal events, background rejection for background ones.
In case of more than two classes this is the part of events of that class that was correctly classified.
This function is needed to control correlation in more than one dimension.
:param variable: feature name or array with values for each event in dataset
:param stage: at which stage to compute (default=None, means after all stages)
:param global_rcp: right-classified parts, for which cuts are computed (default=[0.5, 0.6, 0.7, 0.8, 0.9])
:param cuts: in addition to global_rcp one can pass the precise values of cuts that will be used
:param n_bins: number of bins (default 20)
:param label: 1 for signal, 0 for background, or label of interested class if multiclassification
:param new_plot: if False, will use the existing figure (default=True)
:param ignored_sidebands: float, part of events from the left and right
that will be ignored (default 0.001 = 0.1%)
:param range: tuple or None, events with values of variable outside this range will be ignored
:param multiclassification: bool, if False, 'physical' names will be used (efficiency, rejection)
:param median_centers: bool, if True, the x of point is median of masses inside bin,
otherwise mean of the bounds
:param compute_cuts_for_other_class: if True, the computed cuts will correspond to rcp of opposite class
(available only for binary classification)
"""
if multiclassification:
assert not compute_cuts_for_other_class, 'this option is unavailable for multiclassification'
if not multiclassification:
assert label in {0, 1}, 'for binary classification label should be in [0, 1]'
mask = self._check_mask(mask)
inner_mask = (mask > 0.5) & (self.y == label)
if range is not None:
left, right = range
else:
signal_masses = self.X.loc[mask, variable].values
left, right = numpy.percentile(signal_masses, [100 * ignored_sidebands, 100 * (1. - ignored_sidebands)])
left -= 0.5
right += 0.5
masses = self.X.loc[:, variable].values
inner_mask &= (masses >= left) & (masses <= right)
if adjust_n_bins:
n_bins = min(n_bins, len(numpy.unique(masses[mask])))
bin_indices = self._compute_bin_indices([variable], n_bins=n_bins, mask=inner_mask)
if median_centers:
bin_centers = self._compute_bin_masscenters(variable, n_bins=n_bins, mask=inner_mask)
else:
bin_centers, = self._compute_bin_centers([variable], n_bins=n_bins, mask=inner_mask)
# Leave only non-empty
bin_mask = self._compute_nonempty_bins_mask([variable], n_bins=n_bins, mask=inner_mask)
global_rcp = self._check_efficiencies(global_rcp)
n_classifiers = len(self.predictions)
if new_plot:
self._strip_figure(n_classifiers)
if multiclassification:
ylabel = 'right-classified part'
legend_label = 'rcp={rcp:.2f}'
elif label == 1:
ylabel = 'signal efficiency'
legend_label = 'avg eff={rcp:.2f}' if not compute_cuts_for_other_class else 'bck rej={rcp:.2f}'
else:
ylabel = 'background rejection'
legend_label = 'avg rej={rcp:.2f}' if not compute_cuts_for_other_class else 'avg eff={rcp:.2f}'
if print_cut:
legend_label += '(cut={cut:.2f})'
for i, (name, proba) in enumerate(self.predictions.items(), start=1):
ax = pylab.subplot(1, n_classifiers, i)
for eff in global_rcp:
if not compute_cuts_for_other_class:
cut = compute_bdt_cut(eff, y_true=mask, y_pred=proba[:, label],
sample_weight=self.checked_sample_weight)
else:
cut = 1 - compute_bdt_cut(eff, y_true=mask, y_pred=proba[:, 1 - label],
sample_weight=self.checked_sample_weight)
bin_effs = compute_bin_efficiencies(proba[mask, label], bin_indices=bin_indices[mask], cut=cut,
sample_weight=self.checked_sample_weight[mask], minlength=n_bins)
ax.plot(bin_centers[bin_mask], bin_effs[bin_mask], label=legend_label.format(rcp=eff, cut=cut),
marker=marker)
ax.set_ylim(0, 1)
ax.set_title(name)
ax.set_xlabel(variable)
ax.set_ylabel(ylabel)
if show_legend:
ax.legend(loc='best')
def efficiency(self, uniform_variables, stages=None, target_efficiencies=None, n_bins=20, label=1):
warnings.warn("This implementation of efficiency is considered outdated, consider using RCP",
DeprecationWarning)
target_efficiencies = self._check_efficiencies(target_efficiencies)
if len(uniform_variables) not in {1, 2}:
raise ValueError("More than two variables are not implemented, you have a 3d-monitor? :)")
mask = self.y == label
bin_indices = self._compute_bin_indices(uniform_variables, n_bins, mask=mask)
total_bins = n_bins ** len(uniform_variables)
def compute_bin_effs(prediction_proba, target_eff):
cut = compute_bdt_cut(target_eff, y_true=mask, y_pred=prediction_proba[:, label],
sample_weight=self.checked_sample_weight)
return compute_bin_efficiencies(prediction_proba[mask, label], bin_indices=bin_indices[mask],
cut=cut, sample_weight=self.checked_sample_weight[mask],
minlength=total_bins)
if len(uniform_variables) == 1:
effs = self._map_on_stages(stages=stages,
function=lambda pred: [compute_bin_effs(pred, eff) for eff in
target_efficiencies])
effs = pandas.DataFrame(effs)
x_limits, = self._compute_bin_centers(uniform_variables, n_bins=n_bins, mask=mask)
for stage_name, stage in effs.iterrows():
self._strip_figure(len(stage))
for i, (name, eff_stage_data) in enumerate(stage.iteritems()):
if isinstance(eff_stage_data, float) and pandas.isnull(eff_stage_data):
continue
ax = pylab.subplot(1, len(stage), i + 1)
for eff, local_effs in zip(target_efficiencies, eff_stage_data):
ax.set_ylim(0, 1)
ax.plot(x_limits, local_effs, label='eff=%.2f' % eff)
ax.set_title(name)
ax.set_xlabel(uniform_variables[0])
ax.set_ylabel('efficiency')
ax.legend(loc='best')
else:
x_limits, y_limits = self._compute_bin_centers(uniform_variables, n_bins=n_bins, mask=mask)
bin_weights = compute_bin_weights(bin_indices, sample_weight=self.checked_sample_weight)
bin_weights.resize(total_bins)
for target_efficiency in target_efficiencies:
staged_results = self._map_on_stages(lambda x: compute_bin_effs(x, target_efficiency), stages=stages)
staged_results = pandas.DataFrame(staged_results)
for stage_name, stage_data in staged_results.iterrows():
print("Stage %s, efficiency=%.2f" % (str(stage_name), target_efficiency))
self._strip_figure(len(stage_data))
for i, (name, local_efficiencies) in enumerate(stage_data.iteritems(), start=1):
if isinstance(local_efficiencies, float) and pandas.isnull(local_efficiencies):
continue
local_efficiencies[bin_weights <= 0] = target_efficiency
local_efficiencies = local_efficiencies.reshape([n_bins, n_bins], ).transpose()
# drawing difference, the efficiency in empty bins will be replaced with mean value
ax = pylab.subplot(1, len(stage_data), i)
p = ax.pcolor(x_limits, y_limits, local_efficiencies, cmap=cm.get_cmap("RdBu"),
vmin=target_efficiency - 0.2, vmax=target_efficiency + 0.2)
ax.set_xlabel(uniform_variables[0]), ax.set_ylabel(uniform_variables[1])
ax.set_title(name)
pylab.colorbar(p, ax=ax)
pylab.show()
return self
def correlation_curves(self, var_name, center=None, step=1, label=1):
""" Correlation between normalized(!) predictions on some class and a variable
:type var_name: str, correlation is computed for this variable
:type center: float|None, if float, the correlation is measured between |x - center| and prediction
:type step: int
:type label: int, label of class, the correlation is computed for the events of this class
:rtype: Predictions, returns self
"""
pylab.title("Pearson correlation with " + str(var_name))
mask = self.y == label
data = self.X.loc[mask, var_name]
if center is not None:
data = numpy.abs(data - center)
weight = check_sample_weight(self.y, self.sample_weight)[mask]
def compute_correlation(prediction_proba):
pred = prediction_proba[mask, label]
pred = build_normalizer(pred, sample_weight=weight)(pred)
return pearsonr(pred, data)[0]
correlations = self._map_on_staged_proba(compute_correlation, step=step)
for classifier_name, staged_correlation in correlations.items():
pylab.plot(staged_correlation.keys(), staged_correlation, label=classifier_name)
pylab.legend(loc="lower left")
pylab.xlabel("stage"), pylab.ylabel("Pearson correlation")
return self
#endregion
def hist(self, var_names, n_bins=20, new_plot=True):
""" Plots 1 and 2-dimensional distributions
:param var_names: array-like of length 1 or 2 with name of variables to plot
:param int n_bins: number of bins for histogram()
:return: self """
plot_classes_distribution(self.X, self.y, var_names, n_bins=n_bins, new_plot=new_plot)
return self
@staticmethod
def _strip_figure(n):
x_size = 12 if n == 1 else 12 + 3 * n
y_size = 10 - n if n <= 5 else 4
pylab.figure(figsize=(x_size, y_size))
def show(self):
pylab.show()
return self
# Helpful functions that can be used separately
def plot_roc(y_true, y_pred, sample_weight=None, classifier_name="", is_cut=False, mask=None):
"""Plots ROC curve in the way physicists like it
:param y_true: numpy.array, shape=[n_samples]
:param y_pred: numpy.array, shape=[n_samples]
:param sample_weight: numpy.array | None, shape = [n_samples]
:param classifier_name: str, the name of classifier for label
:param is_cut: predictions are binary
:param mask: plot ROC curve only for events that have mask=True
"""
if is_cut:
assert len(numpy.unique(y_pred)) == 2, 'Cut assumes that prediction are 0 and 1 (or True/False)'
MAX_STEPS = 500
y_true, y_pred = check_arrays(y_true, y_pred)
if mask is not None:
mask = numpy.array(mask, dtype=bool) # converting to bool, just in case
y_true = y_true[mask]
y_pred = y_pred[mask]
if sample_weight is not None:
sample_weight = sample_weight[mask]
fpr, tpr, thresholds = check_arrays(*roc_curve(y_true, y_pred, sample_weight=sample_weight))
roc_auc = auc(fpr, tpr)
# tpr = recall = isSasS / isS = signal efficiency
# fpr = isBasS / isB = 1 - specificity = 1 - backgroundRejection
bg_rejection = 1. - fpr
if len(fpr) > MAX_STEPS:
# decreasing the number of points in plot
targets = numpy.linspace(0, 1, MAX_STEPS)
x_ids = numpy.searchsorted(tpr, targets)
y_ids = numpy.searchsorted(fpr, targets)
indices = numpy.concatenate([x_ids, y_ids, [0, len(tpr) - 1]], )
indices = numpy.unique(indices)
tpr = tpr[indices]
bg_rejection = bg_rejection[indices]
if not is_cut:
pylab.plot(tpr, bg_rejection, label='%s (area = %0.3f)' % (classifier_name, roc_auc))
else:
pylab.plot(tpr[1:2], bg_rejection[1:2], 'o', label='%s' % classifier_name)
def plot_classes_distribution(X, y, var_names, n_bins=20, new_plot=True):
y = column_or_1d(y)
labels = numpy.unique(y)
if len(var_names) == 1:
if new_plot:
pylab.figure(figsize=(14, 7))
pylab.title('Distribution of classes')
for label in labels:
pylab.hist(numpy.ravel(X.ix[y == label, var_names]), label='class=%i' % label, alpha=0.3, bins=n_bins)
pylab.xlabel(var_names[0])
pylab.legend()
elif len(var_names) == 2:
if new_plot:
pylab.figure(figsize=(12, 10))
pylab.title('Distribution of classes')
x_var, y_var = var_names
for label in labels:
alpha = numpy.clip(2000. / numpy.sum(y == label), 0.02, 1)
pylab.plot(X.loc[y == label, x_var], X.loc[y == label, y_var], '.',
alpha=alpha, label='class=' + str(label))
else:
raise ValueError("More than two variables are not implemented")
def plot_features_pdf(X, y, n_bins=20, n_columns=3, ignored_sideband=0.001, mask=None,
sig_label='sig', bck_label='bck', adjust_n_bins=True, normed=True):
"""
Plots in concise form distributions of all features
"""
columns = sorted(X.columns)
mask = numpy.ones(len(X), dtype=bool) if mask is None else mask
for i, column in enumerate(columns, 1):
pylab.subplot((len(columns) + n_columns - 1) // n_columns, n_columns, i)
feature_bins = n_bins
if adjust_n_bins:
feature_bins = min(n_bins, len(numpy.unique(X.ix[:, column])))
limits = numpy.percentile(X.loc[mask, column], [100 * ignored_sideband, 100 * (1. - ignored_sideband)])
pylab.hist(X.ix[(y == 1) & mask, column].values, bins=feature_bins, normed=normed,
range=limits, alpha=0.3, label=sig_label, color='b')
pylab.hist(X.ix[(y == 0) & mask, column].values, bins=feature_bins, normed=normed,
range=limits, alpha=0.3, label=bck_label, color='r')
pylab.legend(loc='best')
pylab.title(column)
| {
"repo_name": "anaderi/lhcb_trigger_ml",
"path": "hep_ml/reports.py",
"copies": "1",
"size": "31998",
"license": "mit",
"hash": 571549035349695600,
"line_mean": 47.4818181818,
"line_max": 119,
"alpha_frac": 0.6043502719,
"autogenerated": false,
"ratio": 3.726764500349406,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4831114772249406,
"avg_score": null,
"num_lines": null
} |
# about:python, originally by Alex Badea
from xpcom import components, verbose
import sys, os
import platform
def getAbout():
# Generate it each time so its always up-to-date.
# Sort to keep things purdy
mod_names = sys.modules.keys()
mod_names.sort()
env = os.environ.items()
env.sort()
return """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>about:python</title>
</head>
<body>
<h1>about:python</h1>
<p> </p>
<p>Python %(version)s on %(platform)s</p>
<h2>resources</h2>
<p>Visit the <a href="http://developer.mozilla.org/en/docs/PyXPCOM">pyxpcom wiki.</a></p>
<h2>sys.path</h2><p>%(path)s</p><p> </p>
<h2>environment</h2><p>%(environment)s</p><p> </p>
<h2>modules</h2><p>%(modules)s</p><p> </p>
</body>
</html>
""" % {
'version': sys.version,
'platform': platform.platform(),
'path': "<br>".join(sys.path),
'environment': "<br>".join(["%s=%s" % (n,v) for n, v in env]),
'modules': ", ".join(mod_names),
}
class AboutPython:
_com_interfaces_ = components.interfaces.nsIAboutModule
_reg_contractid_ = '@mozilla.org/network/protocol/about;1?what=python'
_reg_clsid_ = '{6d5d462e-6de7-4bca-bbc6-c488d481351b}'
_reg_desc_ = "about:python handler"
def __init__(self):
pass
def newChannel(self, aURI):
ioService = components.classes["@mozilla.org/network/io-service;1"] \
.getService();
istream = components.classes["@mozilla.org/io/string-input-stream;1"] \
.createInstance()
about = getAbout()
istream.setData(about, len(about))
channel = components.classes["@mozilla.org/network/input-stream-channel;1"] \
.createInstance(components.interfaces.nsIInputStreamChannel)
channel.setURI(aURI)
#channel.contentType = "text/html"
channel.contentStream = istream
return channel
def getURIFlags(self, aURI):
return 0;
| {
"repo_name": "tmhorne/celtx",
"path": "extensions/python/xpcom/components/pyabout.py",
"copies": "1",
"size": "1960",
"license": "mpl-2.0",
"hash": 2417362241978859000,
"line_mean": 27.8235294118,
"line_max": 89,
"alpha_frac": 0.6270408163,
"autogenerated": false,
"ratio": 3.0340557275541795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41610965438541797,
"avg_score": null,
"num_lines": null
} |
import pypuppetdb
import collectd
from pypuppetdb import connect
# Host to connect to. Override in config by specifying 'Host'.
PUPPETDB_HOST = 'localhost'
# Port to connect to. Override in config by specifying 'Port'.
PUPPETDB_PORT = '8080'
# Use ssl. Override in config by specifying 'SSL_VERIFY'.
PUPPETDB_SSL = None
# Key used to connect to ('/path/to/private.pem'). Override in config by specifying 'Key'.
PUPPETDB_KEY = None
# CERT used to connect to ('/path/to/public.pem'). Override in config by specifying 'CERT'.
PUPPETDB_CERT = None
#Connect timeout. Override in config by specifying 'Timeout'.
PUPPETDB_TIMEOUT = '20'
#Time to consider unreported nodes. Override in config by specifying 'UnreportTime'.
UNREPORTED_TIME = 25
# Verbose logging on/off. Override in config by specifying 'Verbose'.
VERBOSE_LOGGING = False
def get_infos(func, *args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
abort(e.response.status_code)
except ConnectionError:
abort(500)
except EmptyResponseError:
abort(204)
def dispatch_value(value, key, type, type_instance=None):
if not type_instance:
type_instance = key
log_verbose('Sending value: %s=%s' % (type_instance, value))
val = collectd.Values(plugin='puppetdb')
val.type = type
val.type_instance = type_instance
val.values = [value]
val.dispatch()
def read_callback():
puppetdb = connect(
api_version= 3,
host=PUPPETDB_HOST,
port=PUPPETDB_PORT,
ssl_verify=PUPPETDB_SSL,
ssl_key=PUPPETDB_KEY,
ssl_cert=PUPPETDB_CERT,
timeout=PUPPETDB_TIMEOUT,
)
prefix = 'com.puppetlabs.puppetdb.query.population'
num_nodes = get_infos(
puppetdb.metric,
"{0}{1}".format(prefix, ':type=default,name=num-nodes'))
num_resources = get_infos(
puppetdb.metric,
"{0}{1}".format(prefix, ':type=default,name=num-resources'))
avg_resources_node = get_infos(
puppetdb.metric,
"{0}{1}".format(prefix, ':type=default,name=avg-resources-per-node'))
# Ftech nodes
nodes = puppetdb.nodes(
unreported=UNREPORTED_TIME,
with_status=True)
#Init stats
stats = {
'changed': 0,
'unchanged': 0,
'failed': 0,
'unreported': 0,
'noop': 0
}
for node in nodes:
if node.status == 'unreported':
stats['unreported'] += 1
elif node.status == 'changed':
stats['changed'] += 1
elif node.status == 'failed':
stats['failed'] += 1
elif node.status == 'noop':
stats['noop'] += 1
else:
stats['unchanged'] += 1
log_verbose('population: %s\n' % num_nodes['Value'])
dispatch_value(num_nodes['Value'], 'population','gauge')
log_verbose('unreported: %s\n' % stats['unreported'])
dispatch_value(stats['unreported'], 'unreported','gauge')
log_verbose('changed: %s\n' % stats['changed'])
dispatch_value(stats['changed'], 'changed','gauge')
log_verbose('failed: %s\n' % stats['failed'])
dispatch_value(stats['failed'], 'failed','gauge')
log_verbose('noop: %s\n' % stats['noop'])
dispatch_value(stats['noop'], 'noop','gauge')
log_verbose('unchanged: %s\n' % stats['unchanged'])
dispatch_value(stats['unchanged'], 'unchanged','gauge')
def log_verbose(msg):
if not VERBOSE_LOGGING:
return
collectd.info('puppetdb plugin [verbose]: %s' % msg)
def configure_callback(conf):
"""Receive configuration block"""
global PUPPETDB_HOST, PUPPETDB_PORT, PUPPETDB_SSL, PUPPETDB_KEY, PUPPETDB_CERT, PUPPETDB_TIMEOUT, UNREPORTED_TIME, VERBOSE_LOGGING
for node in conf.children:
if node.key == 'Host':
PUPPETDB_HOST = node.values[0]
elif node.key == 'Port':
PUPPETDB_PORT = node.values[0]
elif node.key == 'SSL_VERIFY':
PUPPETDB_SSL = node.values[0]
elif node.key == 'Key':
PUPPETDB_KEY = node.values[0]
elif node.key == 'CERT':
PUPPETDB_CERT = node.values[0]
elif node.key == 'Timeout':
PUPPETDB_TIMEOUT = int(node.values[0])
elif node.key == 'UnreportTime':
UNREPORTED_TIME = int(node.values[0])
elif node.key == 'Verbose':
VERBOSE_LOGGING = bool(node.values[0])
else:
collectd.warning('puppetdb plugin: Unknown config key: %s.'
% node.key)
log_verbose('Configured with host=%s, port=%s, ssl=%s, key=%s, cert=%s, timeout=%s' % (PUPPETDB_HOST, PUPPETDB_PORT, PUPPETDB_SSL, PUPPETDB_KEY, PUPPETDB_CERT, PUPPETDB_TIMEOUT))
collectd.register_config(configure_callback)
collectd.register_read(read_callback) | {
"repo_name": "vincentbernat/collectd-puppetdb",
"path": "puppetdb.py",
"copies": "1",
"size": "5145",
"license": "mit",
"hash": 2074521579100940800,
"line_mean": 31.3647798742,
"line_max": 182,
"alpha_frac": 0.6211856171,
"autogenerated": false,
"ratio": 3.3172147001934236,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4438400317293424,
"avg_score": null,
"num_lines": null
} |
import sys
import numpy as np
from sklearn import tree, linear_model
import argparse
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--traning_data', help = 'Training data', required = True)
parser.add_argument('-v', '--testing_data', help = 'Testing data', required = True)
return vars(parser.parse_args())
def get_data_details(csv_data):
data = np.genfromtxt(csv_data, delimiter = ",")
features = data[:, [0, 1, 2]]
labels = data[:, 3]
return features, labels
def get_occuracy(real_labels, predicted_labels, fltr):
real_label_count = 0.0
predicted_label_count = 0.0
for real_label in real_labels:
if real_label == fltr:
real_label_count += 1
for predicted_label in predicted_labels:
if predicted_label == fltr:
predicted_label_count += 1
print "Real number of attacks: " + str(real_label_count)
print "Predicted number of attacks: " + str(predicted_label_count)
precision = predicted_label_count * 100 / real_label_count
return precision
| {
"repo_name": "slrbl/Intrusion-and-anomaly-detection-with-machine-learning",
"path": "utilities.py",
"copies": "1",
"size": "1253",
"license": "mit",
"hash": -251943514073552100,
"line_mean": 30.325,
"line_max": 88,
"alpha_frac": 0.6105347167,
"autogenerated": false,
"ratio": 3.8085106382978724,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49190453549978724,
"avg_score": null,
"num_lines": null
} |
'''A box model is used to decribe the growth of mussels, mainly Mytilus edulis, in a small aquaculture site at Upper South Cove
near Lunenburg Nova Scotia. The ecological interactions in the model include 2 competing herbivores, mussels and zooplankton,
and 2 food sources, phytoplankton and non-plankton seston.
Dowd (1997) On predicting the growth o cultured bivalves. Ecological modelling 104 (1997) 113-131
'''
def load_defaults():
'''
This function creates a dictionaries called "par" and "InitCond"
and pre-loads them with all the default
parameters and initial conditions, respectively.
Also outputs days and dt
'''
# Framework
days = 365 * 5 # One year
dt = 0.01 # units: days
# Parameters (defaults)
par = {}
par['I_M'] = 0.1 # Ingestion rate for mussel (units: d^-1)
par['R_M'] = 0.01 # Respiration rate for mussel
par['epsilon_MP'] = 0.9 # Assimilation efficiency for mussels on phytoplankton
par['epsilon_MS'] = 0.2 # Assimilation efficiency for mussels on seston
par['mu_M'] = 0.8 # Selection factor for mussels
par['lambda_M'] = -0.002 # Mortality rate for mussels (units: d^-1)
par['kappa_M'] = 1000 # half-saturation constant for mussel ingestion (units: gC m^-3)
par['Q_MI'] = 0.07 # Temperature rate constant (units: degree C^-1)
par['Q_MR'] = 0.07 # (units:degree C^-1)
par['b'] = -2 # Allometric exponent
par['T'] = 10 # temperature (units: degree C)
par['D_M'] = 0 # Spawning parameter, set to zero for simplicity
# Diego's: I added 3 new parameters =====================================================================================
# par['C_MI'] = 70
# par['C_MR1'] = .01
# par['C_MR2'] = .01
par['C_MI'] = 40.
par['C_MR1'] = .01
par['C_MR2'] = .01
# Initial conditions
InitCond = {}
InitCond['M'] = 0.015 # mussel weight
InitCond['P'] = 0.1 # phytoplanton (units: gC m-3)
InitCond['S'] = 1.0 # Seston (units: gC m-3)
return days, dt, par, InitCond
def run_model(days,dt,InitCond,par):
'''
This is your model. Do a brief description.
INPUTS:
days: number of days of simulation
dt: time steps (units: days)
InitCond: Dictionary with all initial conditions
par: Dictionary with all model parameters
OUTPUTS:
var1: name (units)
var2: name (units)
var3: name (units)
Don't forget to reference the paper where you got it
'''
# Import libraries
import numpy as np
import math
# Setup the framework (calculate timestemps, create zero vectors, create time vector)
NoSTEPS = int(days / dt) # Calculates the number of steps by dividing days by dt and rounding down
time = np.linspace(0,days,NoSTEPS) # Makes and vector array of equally spaced numbers from zero to "days"
#Make empty arrays
M = np.zeros((NoSTEPS,),float) # makes a vector array of zeros (size: NoSTEPS rows by ONE column)
#
# Initializing with initial conditions
M[0] = InitCond['M']
P = InitCond['P']
S = InitCond['S']
# *****************************************************************************
# MAIN MODEL LOOP *************************************************************
for t in range(0,NoSTEPS-1):
# Diego's: I replaced "1s" for parameters par['C_MI'], par['C_MR1'], par['C_MR2'] ===========================================
f_MI = par['C_MI']*((P+S)/(par['kappa_M']+P+S))*(np.exp(par['Q_MI']*par['T']))*(M[t]**par['b'])
f_MR = (par['C_MR1']*(M[t]**par['b']))+(par['C_MR2']*(P+S))*(np.exp(par['Q_MR']*par['T']))*(M[t]**par['b'])
# Diego's: I was using these prints to see the values of f_MI and f_MR during the run
#print f_MI
#print f_MR
# The growth rate of an individual mussel (Eq. 1)
dMdt = ((par['epsilon_MP']*P)/(P+par['mu_M']*S)+(par['epsilon_MS']*par['mu_M']*S)/(P+par['mu_M']*S))* \
(f_MI*par['I_M']*M[t])-(f_MR*par['R_M']*M[t])-(par['D_M'])
# time stepping
M[t+1] = M[t] + (dMdt * dt)
# END of MAIN MODEL LOOP ******************************************************
# *****************************************************************************
# Pack output into dictionary
output = {}
output['time'] = time
output['M'] = M
print "Model run: DONE!!!"
return output
def plot_model(output):
'''
Script to make plots
'''
# Import libraries
import matplotlib.pyplot as plt
# Plotting
fig, (ax) = plt.subplots(1,1)
ax.plot(output['time']/365,output['M'],'b-')
ax.set_xlabel('Time (days)')
ax.set_ylabel('Mussel Dry Weight (gC)')
ax.set_title('Model predicted growth trajectories for mussels in a coastal inlet near Lunenburg Nova Scotia')
plt.show()
return
# Diego's I added this so that you can run the model by running THIS script, rather than by using the "experiment_run.py" script
if __name__ == '__main__':
# load default parameters
days, dt, par, InitCond = load_defaults()
# run the model
output = run_model(days,dt,InitCond,par)
# plot model
plot_model(output) | {
"repo_name": "Diego-Ibarra/aquamod",
"path": "aquamod/bivalves/mussel_Dowd1997.py",
"copies": "1",
"size": "5402",
"license": "mit",
"hash": -4360261048328527000,
"line_mean": 35.5067567568,
"line_max": 133,
"alpha_frac": 0.5429470566,
"autogenerated": false,
"ratio": 3.395348837209302,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4438295893809302,
"avg_score": null,
"num_lines": null
} |
""" A box of elements for getting input from a microphone.
"""
from .box import Box
class Mic(Box):
SRC_TEMPLATE = None
def __init__(self, pipeline, name, device):
super(Mic, self).__init__(name, pipeline)
self.add_sequence([
self.SRC_TEMPLATE % {
"name": "src",
"device": device,
},
'equalizer-10bands',
'tee',
])
@classmethod
def _create_many(cls, pipeline, srcs):
return [
cls(pipeline, 'mic%d' % i, device)
for i, device in enumerate(srcs)
]
@classmethod
def all(self):
raise NotImplementedError("Please implement. :)")
class AlsaMic(Mic):
SRC_TEMPLATE = "alsasrc name=%(name)s device=%(device)s"
@classmethod
def all(cls, pipeline):
from .alsa import find_alsa_cards
return cls._create_many(pipeline, find_alsa_cards())
class PulseMic(Mic):
SRC_TEMPLATE = "pulsesrc name=%(name)s device=%(device)d"
@classmethod
def all(cls, pipeline):
from .pulse import find_pulse_srcs
return cls._create_many(pipeline, find_pulse_srcs())
| {
"repo_name": "hodgestar/laghuis",
"path": "laghuis/mic.py",
"copies": "1",
"size": "1174",
"license": "mit",
"hash": 8387268822328807000,
"line_mean": 22.9591836735,
"line_max": 61,
"alpha_frac": 0.5672913118,
"autogenerated": false,
"ratio": 3.6802507836990594,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47475420954990594,
"avg_score": null,
"num_lines": null
} |
"""A Broadstreet Ads API wrapper.
This is a thin layer over the python requests library to simplify
access to the Broadstreet Ads API. It provides the functionality:
* Serialization and deserialization of data
* Convert API errors into python exceptions
* Re-trying requests if possible on various errors (TODO)
"""
import time
import requests
_missing = object()
class APIError(Exception):
"""An error response from the broadstreet API."""
def __init__(self,
response):
self.response = response
self.status_code = response.status_code
message = '{response.status_code} {response.content}'.format(response=response)
Exception.__init__(self, message)
class APIServerError(APIError):
pass
class APIConnection(object):
API_VERSION = None
def __init__(self,
access_token,
host='api.broadstreetads.com'):
self._host = host
self._access_token = access_token
def _url(self, path):
assert path.startswith('/')
subs = dict(
access_token=self._access_token,
host=self._host,
version=self.API_VERSION,
path=path)
return 'https://{host}/api/{version}{path}'.format(**subs)
def _get_result(self, response, raw):
if raw:
return response
if response.status_code >= 500 and response.status_code < 600:
raise APIServerError(response)
if response.status_code == 204:
return None
if response.status_code >= 200 and response.status_code < 300:
return response.json()
raise APIError(response)
def get(self, path, _raw=False):
url = self._url(path)
params = {'access_token': self._access_token}
r = requests.get(
url,
verify=True,
params=params)
return self._get_result(r, _raw)
def post(self, path, data, _raw=False):
url = self._url(path)
d = {'access_token': self._access_token}
d.update(data)
r = requests.post(
url,
verify=True,
data=d)
return self._get_result(r, _raw)
def delete(self, path, _raw=False):
url = self._url(path)
params = {'access_token': self._access_token}
r = requests.delete(
url,
verify=True,
params=params)
return self._get_result(r, _raw)
def patch(self, path, data, _raw=False):
url = self._url(path)
d = {'access_token': self._access_token}
d.update(data)
r = requests.patch(
url,
verify=True,
data=d)
return self._get_result(r, _raw)
class APIv0(APIConnection):
"""Connection to version 0 of the breadstreet API"""
API_VERSION = 0
def get_networks(self):
resp = self.get('/networks')
return resp['networks']
def get_zones(self, network):
url = '/networks/{network}/zones'.format(network=network)
resp = self.get(url)
return resp['zones']
def create_zone(self, network, name, alias=None):
url = '/networks/{network}/zones'.format(network=network)
data = dict(name=name)
if alias is not None:
data['alias'] = alias
resp = self.post(url, data)
return resp['zone']
def delete_zone(self, network, zone):
url = '/networks/{network}/zones/{zone}'.format(
network=network,
zone=zone)
resp = self.delete(url)
return resp
def update_zone(self, network, zone, name=_missing, alias=_missing):
params = [
('name', name),
('alias', alias)]
params = dict([(k, v) for k, v in params if v is not _missing])
assert params
url = '/networks/{network}/zones/{zone}'.format(
network=network,
zone=zone)
resp = self.patch(url, params)
return resp
def sync_zones(conn, namespace, network, zones):
"""Synchronize a local set of zones with one in broadstreet.
`namespace` should be something very unique. A UUID or identifer of a
product. it will be pre-pended to the alias of all zones with a dot (.).
`zones` is a dictionary keyed by the zone alias. The values are
dictionaries of the zone attributes.
`network` integer id of the network to modify.
`conn` is a broadstreet API connection.
"""
def backoff():
# sleep for 50 milliseconds after making a WRITE request so to not
# bombard the broadstreet API
time.sleep(0.05)
created = []
fixed = []
deleted = []
unchanged = []
ignored = []
have_zones = {}
seen = set([])
for zone in conn.get_zones(network):
alias = zone.get('alias')
if not alias or not alias.startswith(namespace + '.'):
ignored.append(zone['id'])
# only consider zones in our namespace
continue
ign, alias = alias.split(namespace + '.', 1)
assert not ign, ign
if alias in seen:
# DUPLICATE, let's delete to remove any abiguities
deleted.append(zone)
conn.delete_zone(network, zone['id'])
backoff()
continue
seen.add(alias)
have_zones[alias] = zone
wanted = zones.get(alias, None)
if wanted is None:
deleted.append(zone)
conn.delete_zone(network, zone['id'])
backoff()
else:
if wanted['name'] != zone['name']:
conn.update_zone(
network,
zone['id'],
name=wanted['name'])
fixed.append(zone['id'])
backoff()
else:
unchanged.append(zone['id'])
for alias, wanted in zones.items():
if alias in have_zones:
continue
ns_alias = namespace + '.' + alias
created.append(ns_alias)
conn.create_zone(network, wanted['name'], alias=ns_alias)
backoff()
return dict(
created=created,
unchanged=unchanged,
deleted=deleted,
fixed=fixed,
ignored=ignored)
if __name__ == '__main__':
# UN-comment for very verbose logging
#import logging
#logging.basicConfig(level=logging.DEBUG)
#import httplib
#httplib.HTTPConnection.debuglevel = 1
from pprint import pprint
conn = APIv0('XXXXXXX')
namespace = 'testing123'
network = 0
wanted = {
'alias_zone_1': dict(name='Zone 1'),
'alias_zone_2': dict(name='Zone 2')}
r = conn.get_zones(network)
pprint(r)
r = sync_zones(conn, namespace, network, wanted)
pprint(r)
r = conn.get_zones(network)
pprint(r)
| {
"repo_name": "mpub/broadstreetads",
"path": "broadstreetads.py",
"copies": "1",
"size": "6960",
"license": "mit",
"hash": -3515607055192901000,
"line_mean": 29.9333333333,
"line_max": 87,
"alpha_frac": 0.555316092,
"autogenerated": false,
"ratio": 4.160191273161985,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5215507365161984,
"avg_score": null,
"num_lines": null
} |
'''a broken pythonic Graph
Nodes and edges, not pretty colors and pitchers.
'''
from . import Point
from .line import Segment
from .exceptions import *
class Node(Point):
'''
XXX missing doc string
'''
pass
class Edge(Segment):
'''
XXX missing doc string
'''
@Segment.A.getter
def A(self):
try:
return self._A
except AttributeError:
pass
self._A = Node()
return self._A
@Segment.B.getter
def B(self):
try:
return self._B
except AttributeError:
pass
self._B = Node()
return self._B
def __hash__(self):
# XOR is uh.. transitive? A^B == B^A
# so edges AB and BA will hash to the same value.
return hash(self.A) ^ hash(self.B)
class Graph(object):
'''
XXX missing doc string
'''
@classmethod
def randomGraph(cls, radius, nodes, origin=None):
'''
'''
if origin is None:
origin = Point()
graph = cls()
while len(graph) < nodes:
try:
graph.addNode(Node.randomLocation(radius, origin))
except ValueError:
pass
return graph
def __init__(self, nodes=None, edges=None):
try:
for node in nodes:
self.nodes.add(Node(node))
except TypeError:
pass
try:
for edge in edges:
self.nodes.add(edge.A)
self.nodes.add(edge.B)
self.edges.add(edge)
except TypeError:
pass
@property
def nodes(self):
try:
return self._nodes
except AttributeError:
pass
self._nodes = set()
return self._nodes
@property
def edges(self):
try:
return self._edges
except AttributeError:
pass
self._edges = set()
return self._edges
def __len__(self):
'''
The number of nodes in the graph, integer.
'''
return len(self.nodes)
def __str__(self):
s = []
s.append(repr(self))
s.extend(['\t' + repr(n) for n in self.nodes])
s.extend(['\t' + repr(e) for e in self.edges])
return '\n'.join(s)
def __repr__(self):
fmt = '%s(nodes=%s,edges=%s)>'
return fmt % (self.__class__.__name__,
str(self.nodes), str(self.edges))
def sortedNodes(self, func=None):
'''
'''
if func is None:
func = lambda x: x.distanceSquared(self.cg)
nodes = list(self.nodes)
nodes.sort(key=func)
return nodes
@property
def cg(self):
'''
Center of gravity, Node.
'''
return Node(sum(self.nodes) // len(self.nodes))
def __eq__(self, other):
'''
x == y iff:
len(x) == len(y)
all nodes of x are in y
'''
if len(self) != len(other):
return False
return self in other
def __contains__(self, other):
otherType = type(other)
if issubtype(otherType, Node):
for node in self.nodes:
if node == other:
return True
return False
if issubtype(otherType, Edge):
for edge in self.edges:
if edge == other:
return True
return False
if issubtype(otherType, Graph):
# graphs need to match nodes AND edges
if len(self.edges) != len(other.edges):
return False
for node in self.nodes:
if node in other:
pass
return True
def disconnect(self):
'''
'''
self.edges.clear()
def connect(self, doDisconnect=True):
'''
'''
if doDisconnect:
self.disconnect()
self.sortNodes()
for A in self.nodes:
for B in self.nodes:
if A is B:
continue
self.edges.append(Edge(A, B))
def drawNodes(self, surface, color):
for node in self.nodes:
node.draw(surface, color)
def drawEdges(self, surface, color):
for edge in self.edges:
edge.draw(surface, color)
def draw(self, surface, nodeColor=(0, 255, 0),
edgeColor=(0, 0, 255), cg=True):
self.drawEdges(surface, edgeColor)
self.drawNodes(surface, nodeColor)
if cg:
self.cg.draw(surface, (255, 0, 0))
| {
"repo_name": "JnyJny/Geometry",
"path": "Geometry/graph.py",
"copies": "1",
"size": "4658",
"license": "mit",
"hash": -7679467674613167000,
"line_mean": 21.180952381,
"line_max": 66,
"alpha_frac": 0.4854014599,
"autogenerated": false,
"ratio": 4.242258652094717,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5227660111994717,
"avg_score": null,
"num_lines": null
} |
""" AbsKinGui for setting lines for Kinematic analysis
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import warnings
import io
import json
from PyQt4 import QtGui
from PyQt4 import QtCore
# Matplotlib Figure object
from astropy import units as u
from linetools.guis import line_widgets as ltgl
from linetools.isgm import utils as ltiu
#from linetools.guis import spec_widgets as lspw
from xastropy.xutils import xdebug as xdb
from xastropy.xguis import spec_widgets as xspw
'''
=======
Analyzing system for future kinematic analysis
Here is now my preferred approach to perform the
analysis:
1. Inspect the velocity plots.
2. Identify the best low-ion transition for the analysis.
a. High S/N
b. Strong, but not saturated (or just barely)
c. Preferably SiII, SII, ZnII, MgII (i.e. highly not refractory)
3. Hit "^" on the line for a low-ion kinematic tracer
a. Adjust velocity limits if need be (1, 2)
4. Hit "&" on the line for a high-ion kinematic tracer
'''
class AbsKinGui(QtGui.QDialog):
""" GUI to analyze absorption lines for future kinematic analysis
"""
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None, outfil='dum_kin.json',
sel_wv=None, name=''):
"""
spec : Filename or Spectrum1D
Norm : Bool (False)
Normalized spectrum?
abs_sys : AbsSystem
Absorption system class
sel_wv : Selected wavelength. Used to inspect a single, unknown line
"""
super(AbsKinGui, self).__init__(parent)
# Initialize
self.abs_sys = abs_sys
if self.abs_sys is not None:
self.z = self.abs_sys.zabs
else:
if z is None:
raise ValueError('AbsKin: Need to set abs_sys or z!')
self.z = z
self.vmnx = vmnx
self.outfil = outfil
self.norm = norm
self.sel_wv = sel_wv
# Grab the pieces and tie together
newfont = QtGui.QFont("Times", 10, QtGui.QFont.Bold)
sys_label = QtGui.QLabel('Name: \n {:s}'.format(name))
sys_label.setFont(newfont)
self.vplt_widg = xspw.VelPlotWidget(ispec, abs_sys=self.abs_sys, llist=llist,
vmnx=self.vmnx, z=self.z, norm=self.norm)
self.pltline_widg = ltgl.PlotLinesWidget(init_llist=self.vplt_widg.llist,
init_z=self.z)
#self.pltline_widg.spec_widg = self.vplt_widg
self.slines = ltgl.SelectedLinesWidget(self.vplt_widg.llist[self.vplt_widg.llist['List']],
init_select=self.vplt_widg.llist['show_line'],
plot_widget=self.vplt_widg)
# Connections
self.pltline_widg.llist_widget.currentItemChanged.connect(self.on_llist_change)
self.connect(self.pltline_widg.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
self.vplt_widg.canvas.mpl_connect('key_press_event', self.on_key)
# Outfil
wbtn = QtGui.QPushButton('Write', self)
wbtn.setAutoDefault(False)
wbtn.clicked.connect(self.write_out)
self.out_box = QtGui.QLineEdit()
self.out_box.setText(self.outfil)
self.connect(self.out_box, QtCore.SIGNAL('editingFinished ()'), self.set_outfil)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Quit
buttons = QtGui.QWidget()
wqbtn = QtGui.QPushButton('Write+Quit', self)
wqbtn.setAutoDefault(False)
wqbtn.clicked.connect(self.write_quit)
qbtn = QtGui.QPushButton('Quit', self)
qbtn.setAutoDefault(False)
qbtn.clicked.connect(self.quit)
# Sizes
lines_widg = QtGui.QWidget()
lines_widg.setMaximumWidth(300)
lines_widg.setMinimumWidth(200)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(sys_label)
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.slines)
vbox.addWidget(wbtn)
vbox.addWidget(self.out_box)
# Write/Quit buttons
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(wqbtn)
hbox1.addWidget(qbtn)
buttons.setLayout(hbox1)
#
vbox.addWidget(buttons)
lines_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.vplt_widg)
hbox.addWidget(lines_widg)
self.setLayout(hbox)
# Initial draw
self.vplt_widg.on_draw()
# Overload, as needed
def on_key(self, event):
pass
# Change list of lines to choose from
def on_llist_change(self):
llist = self.pltline_widg.llist
all_lines = list( llist[llist['List']]._data['wrest'] )
# Set selected
abs_sys = self.vplt_widg.abs_sys
wrest = [line.wrest for line in abs_sys.lines]
select = []
for iwrest in wrest:
try:
select.append(all_lines.index(iwrest))
except ValueError:
pass
select.sort()
# GUIs
self.vplt_widg.llist['List'] = llist['List']
self.vplt_widg.llist['show_line'] = select
self.vplt_widg.idx_line = 0
self.slines.selected = select
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.slines.on_list_change(llist[llist['List']])
# Write
def set_outfil(self):
self.outfil = str(self.out_box.text())
print('AbsKin: Will write to {:s}'.format(self.outfil))
# Set z from pltline_widg
def setz(self):
self.vplt_widg.abs_sys.zabs = self.pltline_widg.llist['z']
self.vplt_widg.z = self.pltline_widg.llist['z']
self.z = self.pltline_widg.llist['z']
self.vplt_widg.on_draw()
# Write
def write_out(self):
# Add components
comps = ltiu.build_components_from_abslines(self.vplt_widg.abs_lines)
self.vplt_widg.abs_sys._components = comps
# Dict
adict = self.vplt_widg.abs_sys.to_dict()
with io.open(self.outfil, 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(adict, sort_keys=True, indent=4,
separators=(',', ': '))))
# Write + Quit
def write_quit(self):
#self.write_out()
self.flg_quit = 1
self.abs_sys = self.vplt_widg.abs_sys
self.done(1)
# Write + Quit
def quit(self):
self.abs_sys = self.vplt_widg.abs_sys # Have to write to pass back
self.flg_quit = 0
self.done(1)
# Script to run XVelPltGui from the command line or ipython
def main(*args, **kwargs):
""" Runs the AbsKinGui
Command line
or from Python
Examples:
1. python ~/xastropy/xastropy/xguis/abskingui.py
2. abskingui.main(filename)
3. abskingui.main(spec1d)
"""
import sys
import argparse
from specutils import Spectrum1D
parser = argparse.ArgumentParser(description='Parse for AbsKingGui')
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("-zsys", type=float, help="System Redshift")
parser.add_argument("-outfil", type=str, help="Output filename")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
if len(args) == 0:
pargs = parser.parse_args()
else: # better know what you are doing!
if isinstance(args[0],(Spectrum1D, tuple)):
if not kwargs['rerun']:
app = QtGui.QApplication(sys.argv)
xdb.set_trace()
gui = AbsKinGui(args[0], **kwargs)
gui.exec_()
#gui.show()
#app.exec_()
return gui, app
else: # String parsing
largs = [iargs for iargs in args]
pargs = parser.parse_args(largs)
xdb.set_trace() # Not setup for command line yet
# Normalized?
norm = True
if pargs.un_norm:
norm = False
# z
try:
zsys = pargs.zsys
except AttributeError:
zsys=None
# z
try:
outfil = pargs.outfil
except AttributeError:
outfil=None
app = QtGui.QApplication(sys.argv)
gui = AbsKinGui(pargs.file, z=zsys, norm=norm, outfil=outfil)
gui.show()
app.exec_()
return gui, app
if __name__ == "__main__":
main()
# python abskingui.py /Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits -zsys 0. -outfil /Users/xavier/Desktop/tmp.fits -unnorm
| {
"repo_name": "profxj/xastropy",
"path": "xastropy/xguis/abskingui.py",
"copies": "2",
"size": "8788",
"license": "bsd-3-clause",
"hash": -7628741023865456000,
"line_mean": 31.1904761905,
"line_max": 162,
"alpha_frac": 0.5975193446,
"autogenerated": false,
"ratio": 3.3878180416345414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.997177460472352,
"avg_score": 0.002712556302204164,
"num_lines": 273
} |
""" Absolute Duality Gap Inverse Optimization
The absolute duality gap method for inverse optimization minimizes the aggregate
duality gap between the primal and dual objective values for each observed
decision. The problem is formulated as follows
.. math::
\min_{\mathbf{c, y},\epsilon_1, \dots, \epsilon_Q} \quad & \sum_{q=1}^Q | \epsilon_q |
\\text{s.t.}\quad\quad & \mathbf{A'y = c}
& \mathbf{c'\hat{x}_q = b'y} + \epsilon_q, \quad \\forall q
& \| \mathbf{c} \|_1 = 1
& \mathbf{y \geq 0}
"""
import cvxpy as cvx
import numpy as np
#import pudb
from ..utils.invoutils import checkFeasibility, validateFOP
class AbsoluteDualityGap():
""" Formulate an Absolute Duality Gap method of GMIO.
Args:
tol (int): Sets number of significant digits. Default is 8.
verbose (bool): Sets displays. Default is False.
force_feasible_method (bool): If set to True, then will enforce the hyperplane projection method regardless of feasible points. Default is False.
normalize_c: Set to either 1 or np.inf. Decides the normalization constraint on c
ban_constraints (list): A list of constraint indices to force to zero when solving. Default is none.
Example:
Suppose that the variables ``A`` and ``b`` are numpy matrices and ``points`` is
a list of numpy arrays::
model = AbsoluteDualityGap()
model.FOP(A, b)
model.solve(points)
print (model.c)
"""
def __init__(self, **kwargs):
self._fop = False
self._verbose = False
self._solved = False
self.tol = 8
self.solver = cvx.ECOS_BB
self.force_feasible_method = False
self.ban_constraints = []
self.normalize_c = 1
self._kwargs = self._initialize_kwargs(kwargs)
def FOP(self, A, b):
""" Create a forward optimization problem.
Args:
A (matrix): numpy matrix of shape :math:`m \\times n`.
b (matrix): numpy matrix of shape :math:`m \\times 1`.
Currently, the forward problem is constructed by the user supplying a
constraint matrix ``A`` and vector ``b``. The forward problem is
.. math::
\min_{\mathbf{x}} \quad&\mathbf{c'x}
\\text{s.t} \quad&\mathbf{A x \geq b}
"""
#self.A = np.mat(A)
#self.b = np.mat(b)
self.A, self.b = validateFOP(A, b)
self._fop = True
def solve(self, points, **kwargs):
""" Solves the inverse optimization problem.
Args:
points (list): list of numpy arrays, denoting the (optimal) observed points.
Returns:
error (float): the optimal value of the inverse optimization problem.
First check if all of the points are feasible, in which case we can
just project the points to each of the hyperplanes. Let :math:`\\bar{x}`
denote the centroid of the points. Then, we just solve
.. math::
\min_{i \in \mathcal{M}} \left\{ \\frac{\mathbf{a_i'\\bar{x} - }b_i }{\| \mathbf{a_i} \|_1} \\right\}
Let :math:`i^*` denote the optimal index. The optimal cost and dual
variables are
.. math::
\mathbf{c^*} &= \mathbf{\\frac{a_{i^*}}{\|a_{i^*}\|}}
\mathbf{y^*} &= \mathbf{\\frac{e_{i^*}}{\|a_{i^*}\|}}
If not all of the points are feasible, then we need to solve an
exponential number of optimization problems. Let :math:`\mathcal{C}^+, \mathcal{C}^- \subseteq \{ 1, \dots, n \}`
be a partition of the index set of length ``n``. For each possible
partition, we solve the following problem
.. math::
\min_{\mathbf{c, y}, \epsilon_1,\dots,\epsilon_Q} \quad & \sum_{q=1}^Q | \epsilon_q |
\\text{s.t.} \quad & \mathbf{A'y = c}
& \mathbf{c'\hat{x}_q = b'y} + \epsilon_q, \quad \\forall q
& \sum_{i \in \mathcal{C}^+} c_i + \sum_{i \in \mathcal{C}^-} c_i = 1
& c_i \geq 0, \quad i \in \mathcal{C}^+
& c_i \leq 0, \quad i \in \mathcal{C}^-
& \mathbf{y \geq 0}
"""
self._kwargs = self._initialize_kwargs(kwargs)
points = [np.mat(point).T for point in points]
assert self._fop, 'No forward model given.'
feasible = checkFeasibility(points, self.A, self.b, self.tol)
if feasible or self.force_feasible_method:
self.error = self._solveHyperplaneProjection(points)
else:
if self.normalize_c == 1:
self.error = self._solveBruteForceNorm1(points)
elif self.normalize_c == np.inf:
self.error = self._solveBruteForceNormInf(points)
else:
return -1
return self.error
def _solveHyperplaneProjection(self, points):
m, n = self.A.shape
errors = np.zeros(m)
for i in range(m):
if i in self.ban_constraints:
errors[i] = 9999999
else:
ai = self.A[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
bi = self.b[i] / np.linalg.norm(self.A[i].T, self.normalize_c)
errors[i] = np.sum([ai * pt - bi for pt in points])
minInd = np.argmin(errors)
self.c = self.A[minInd] / np.linalg.norm(self.A[minInd].T,
self.normalize_c)
self.c = self.c.tolist()[0]
self.error = errors[minInd]
self.dual = np.zeros(m)
self.dual[minInd] = 1 / np.linalg.norm(self.A[minInd].T,
self.normalize_c)
self._solved = True
return errors[minInd]
def _baseBruteForceProblem(self, y, z, c, points):
obj = cvx.Minimize(sum(z))
cons = []
cons.append(y >= 0)
cons.append(self.A.T * y == c)
for i in range(len(points)):
chi = self.A * points[i] - self.b
cons.append(z[i] >= y.T * chi)
cons.append(z[i] >= -1 * y.T * chi)
for i in self.ban_constraints:
cons.append(y[i] == 0)
return obj, cons
def _solveBruteForceNorm1(self, points):
m, n = self.A.shape
nPoints = len(points)
nFormulations = 2**n
bestResult = np.inf
for formulation in range(nFormulations):
binFormulation = format(formulation, '0{}b'.format(n))
cSign = [int(i) for i in binFormulation]
cSign = np.mat(cSign)
cSign[cSign == 0] = -1
y = cvx.Variable(m)
z = cvx.Variable(nPoints)
c = cvx.Variable(n)
obj, cons = self._baseBruteForceProblem(y, z, c, points)
# add the normalization constraint
cons.append(cSign * c == 1)
for i in range(n):
if cSign[0, i] == 1:
cons.append(c[i] >= 0)
else:
cons.append(c[i] <= 0)
prob = cvx.Problem(obj, cons)
result = prob.solve(solver=self.solver)
if result < bestResult:
bestResult = result
self.c = c.value / np.linalg.norm(c.value, 1)
self.dual = y.value / np.linalg.norm(c.value, 1)
self._solved = True
self.error = bestResult
self.dual = self.dual.T.tolist()[0] # reconvert to just a list
self.c = self.c.T.tolist()[0]
return self.error
def _solveBruteForceNormInf(self, points):
m, n = self.A.shape
nPoints = len(points)
bestResult = np.inf
for j in range(n):
y1 = cvx.Variable(m)
z1 = cvx.Variable(nPoints)
c1 = cvx.Variable(n)
obj1, cons1 = self._baseBruteForceProblem(y1, z1, c1, points)
# Add the normalization constraint
cons1.append(c1 <= 1)
cons1.append(c1 >= -1)
cons1.append(c1[j] == 1)
prob1 = cvx.Problem(obj1, cons1)
result1 = prob1.solve(solver=self.solver)
y2 = cvx.Variable(m)
z2 = cvx.Variable(nPoints)
c2 = cvx.Variable(n)
obj2, cons2 = self._baseBruteForceProblem(y2, z2, c2, points)
# Add the normalization constraint
cons2.append(c2 <= 1)
cons2.append(c2 >= -1)
cons2.append(c2[j] == -1)
prob2 = cvx.Problem(obj2, cons2)
result2 = prob2.solve(solver=self.solver)
optimalReform = np.argmin([result1, result2, bestResult])
if optimalReform == 0:
bestResult = result1
self.c = c1.value / np.linalg.norm(c1.value, np.inf)
self.dual = y1.value / np.linalg.norm(y1.value, np.inf)
elif optimalReform == 1:
bestResult = result2
self.c = c2.value / np.linalg.norm(c2.value, np.inf)
self.dual = y2.value / np.linalg.norm(y2.value, np.inf)
self._solved = True
self.error = bestResult
self.dual = self.dual.T.tolist()[0] # reconvert to just a list
self.c = self.c.T.tolist()[0]
return self.error
def rho(self, points):
""" Solves the goodness of fit.
"""
assert self._solved, 'you need to solve first.'
m, n = self.A.shape
numer = [
np.abs(np.dot(self.c, point) - np.dot(self.dual, self.b))
for point in points
]
numer = sum(numer)
denom = 0
for i in range(m):
denomTerm = [
np.abs(np.dot(self.A[i], point) - self.b[i]) / np.linalg.norm(
self.A[i].T, self.normalize_c) for point in points
]
denom += sum(denomTerm)
rho = 1 - numer / denom
return rho[0, 0]
def _initialize_kwargs(self, kwargs):
if 'verbose' in kwargs:
assert isinstance(kwargs['verbose'],
bool), 'verbose needs to be True or False.'
self._verbose = kwargs['verbose']
if 'tol' in kwargs:
assert isinstance(kwargs['tol'],
int), 'tolerance needs to be an integer.'
self.tol = kwargs['tol']
if 'force_feasible_method' in kwargs:
assert isinstance(
kwargs['force_feasible_method'],
bool), 'force feasible method needs to be True or False.'
self.force_feasible_method = kwargs['force_feasible_method']
if 'ban_constraints' in kwargs:
assert isinstance(kwargs['ban_constraints'],
list), 'ban constraints needs to be a list.'
self.ban_constraints = kwargs['ban_constraints']
if 'normalize_c' in kwargs:
assert kwargs['normalize_c'] == 1 or kwargs['normalize_c'] == np.inf, 'normalize c with 1 or infinity norm.'
self.normalize_c = kwargs['normalize_c']
if 'solver' in kwargs:
if kwargs['solver'] in cvx.installed_solvers():
self.solver = getattr(cvx, kwargs['solver'])
else:
print('you do not have this solver.')
return kwargs
| {
"repo_name": "rafidrm/invo",
"path": "invo/LinearModels/AbsoluteDualityGap.py",
"copies": "1",
"size": "11291",
"license": "mit",
"hash": 7772254577655969000,
"line_mean": 34.61829653,
"line_max": 153,
"alpha_frac": 0.53635639,
"autogenerated": false,
"ratio": 3.5562204724409447,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45925768624409447,
"avg_score": null,
"num_lines": null
} |
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| {
"repo_name": "Charnelx/django-split-settings",
"path": "tests/settings/components/static.py",
"copies": "1",
"size": "1251",
"license": "bsd-3-clause",
"hash": 8489234388108962000,
"line_mean": 36.9090909091,
"line_max": 79,
"alpha_frac": 0.7330135891,
"autogenerated": false,
"ratio": 3.745508982035928,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978522571135928,
"avg_score": null,
"num_lines": null
} |
# Absolute import needed to import ~/.config/spotipy/settings.py and not ourselves
from __future__ import absolute_import
from copy import copy
import getpass
import glib
import os
import sys
import json
from spotipy import SETTINGS_PATH, SETTINGS_FILE, SETTINGS_JSON_FILE
class SettingsProxy(object):
def __init__(self, default_settings_module):
self.default = self._get_settings_dict_from_module(
default_settings_module)
self.local = self._get_local_settings()
self.runtime = {}
self.__read_values()
def _get_local_settings(self):
if not os.path.isfile(SETTINGS_FILE):
return {}
sys.path.insert(0, SETTINGS_PATH)
# pylint: disable = F0401
import settings as local_settings_module
# pylint: enable = F0401
return self._get_settings_dict_from_module(local_settings_module)
def _get_settings_dict_from_module(self, module):
settings = filter(lambda (key, value): self._is_setting(key),
module.__dict__.iteritems())
return dict(settings)
def _is_setting(self, name):
return name.isupper()
@property
def current(self):
current = copy(self.default)
current.update(self.local)
current.update(self.runtime)
return current
def set_values(self, val):
self.runtime.update(val)
def get_values(self):
return self.current
def __read_values(self):
try:
f = open(SETTINGS_JSON_FILE, 'r')
json_obj = json.load(f)
f.close()
#print json_obj
self.runtime.update(json_obj)
except Exception as ex:
#print str(ex)
pass
def save_values(self):
x = self.get_values()
d = {}
for a in x.keys():
if a.find("_") != 0:
d[a] = x[a]
json_str = json.dumps(d, sort_keys=True, indent=4)
f = open(SETTINGS_JSON_FILE, 'w')
f.write(json_str)
f.flush()
f.close()
def __getattr__(self, attr):
if not self._is_setting(attr):
return
if attr not in self.current:
raise Exception(u'Setting "%s" is not set.' % attr)
value = self.current[attr]
#if isinstance(value, basestring) and len(value) == 0:
# raise Exception(u'Setting "%s" is empty.' % attr)
if not value:
return value
if attr.endswith('_PATH') or attr.endswith('_FILE'):
value = os.path.expanduser(value)
value = os.path.abspath(value)
return value
def __setattr__(self, attr, value):
if self._is_setting(attr):
self.runtime[attr] = value
else:
super(SettingsProxy, self).__setattr__(attr, value)
| {
"repo_name": "ZenHarbinger/spotipy",
"path": "spotipy/utils/settings.py",
"copies": "1",
"size": "2822",
"license": "apache-2.0",
"hash": 8366272326068057000,
"line_mean": 29.3440860215,
"line_max": 82,
"alpha_frac": 0.5737065911,
"autogenerated": false,
"ratio": 3.839455782312925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9850821215675368,
"avg_score": 0.012468231547511309,
"num_lines": 93
} |
# absolute_import prevents conflicts between project celery.py file
# and the celery package.
from __future__ import absolute_import
from datetime import datetime
import gzip
import os
from random import randint
from celery import shared_task
from django.conf import settings
from django.core.files import File
@shared_task
def timestamp():
"""An example celery task, appends datetime to a log file."""
LOGFILE = os.path.join(settings.MEDIA_ROOT, 'stamped_log_file.txt')
with open(LOGFILE, 'a') as logfile:
datetime_str = str(datetime.now()) + '\n'
logfile.write(datetime_str)
@shared_task
def gzip_compress(file_in):
"""
Example celery asynchronous file processing task, performs gzip.
arguments:
file_in: an UploadFile model instance
"""
input_file = file_in.uploadfile
gzip_filename = os.path.basename(input_file.path) + '.gz'
tmp_gzip_path = os.path.join('/tmp', 'django_celery_fileprocess-' +
str(randint(10000000,99999999)) + '-' +
gzip_filename)
# Create temporary output file, compressed with gzip.
with gzip.open(tmp_gzip_path, 'wb+') as gzip_out:
gzip_out.writelines(input_file)
gzip_out.close()
# After closing, reopen the temporary output file as Django File object
# and use that to save the file as the processedfile FileField.
with open(tmp_gzip_path, 'rb') as f:
output_file = File(f)
file_in.processedfile.save(gzip_filename, output_file)
# Clean up.
os.remove(tmp_gzip_path)
| {
"repo_name": "madprime/django_celery_fileprocess_example",
"path": "file_process/tasks.py",
"copies": "1",
"size": "1592",
"license": "apache-2.0",
"hash": -1603496881789344000,
"line_mean": 31.4897959184,
"line_max": 75,
"alpha_frac": 0.6639447236,
"autogenerated": false,
"ratio": 3.836144578313253,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9877950878577639,
"avg_score": 0.02442768466712291,
"num_lines": 49
} |
# Absolute import (the default in a future Python release) resolves
# the collections import as the Python standard collections module
# rather than this module of the same name.
from __future__ import absolute_import
from copy import copy
from collections import (Iterable, Mapping, defaultdict)
import functools
import itertools
import six
def is_nonstring_iterable(value):
"""
:param value: the object to check
:return: whether the given value is a non-string iterable object
"""
return isinstance(value, Iterable) and not isinstance(value, six.string_types)
def concat(*iterables):
"""
:param iterables: the iterables to concatenate
:return: the concatenated list
:rtype: list
"""
return list(itertools.chain(*iterables))
def tuplize(iterable):
"""
Recursively creates nested tuples from the given iterable object.
:param iterable: the iterable to convert
:return: the comparable tuple
"""
return tuple(tuplize(elt) if is_nonstring_iterable(elt) else elt
for elt in iterable)
def to_series(items, conjunction='and'):
"""
Formats the given items as a series string.
Example:
>>> to_series([1, 2, 3])
'1, 2 and 3'
:param items: the items to format in a series
:param conjunction: the series conjunction
:return: the items series
:rtype: str
"""
if not items:
return ''
prefix = ', '.join([str(i) for i in items[:-1]])
suffix = str(items[-1])
if not prefix:
return suffix
else:
return (' ' + conjunction + ' ').join([prefix, suffix])
def nested_defaultdict(factory, levels=0):
"""
Makes a defaultdict for the given factory and number of levels, e.g.::
>> from qiutil.collections import nested_defaultdict as dd
>> dd(list, 0)[1]
[]
>> dd(dict, 2)[1][2][3]
{}
Note that the default levels parameter value 0 is synonymous with the
standard Python collections defaultdict, i.e.::
dd(list)
is the same as::
dd(list, 0)
or::
from collections import defaultdict
defaultdict(list)
Thus, this ``nested_defaultdict`` function can serve as a drop-in
replacement for ``defaultdict``.
:param factory: the 0th level defaultdict factory.
:param levels: the number of levels
"""
# The recursive nested dictionary generator, where f is the factory
# and n is the number of levels.
dd = lambda f, n: defaultdict((lambda: dd(f, n - 1)) if n else f)
return dd(factory, levels)
def update(target, *sources, **opts):
"""
Updates the given target object from the given source objects.
The target object can be a dictionary, list or set. The target
and sources are validated for compatibility as follows:
* If the target object is a Mapping, then the sources must
be Mappings.
* Otherwise, if the target object is a list or set, then the
sources must be non-string iterables.
The target is updated from the sources in order as follows:
* If the target object is a Mapping and the *recursive* flag is
falsey, then the standard Python dictionary update is applied.
* If the target object is a Mapping and the *recursive* flag is
truthy, then the update is applied recursively to nested
dictionaries, e.g.:
>> from qiutil.collections import update
>> target = dict(a=dict(aa=1))
>> update(target, dict(a=dict(aa=2, ab=3)))
>> target
{'a': {'aa': 2, 'ab': 3}}
* If the target object is a list or set, then the source items
which are not yet in the target are added to the target, e.g.:
>> from qiutil.collections import update
>> target = [1, 2, 2, 5]
>> update(target, [4, 2, 6, 6])
>> target
[1, 2, 2, 5, 4, 6, 6]
This function adapts the solution offered in a
`StackOverflow post <http://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth>`
to support lists, sets and multiple sources.
:param target: the dictionary to update
:param sources: the update source dictionaries
:param opts: the following keyword options:
:keyword recursive: if True, then apply the update recursively to
nested dictionaries
"""
# Validate the sources.
_validate_update_compatibility(target, *sources)
# Make the update helper function. This idiom refactors the source
# iteration block into a callable function with a sole source argument.
# This pattern is a little obscure to those not well-versed in functional
# programming, but it is cleaner than the alternative of embedding the
# _updater logic into the source iteration.
updater = _create_updater(target, **opts)
# Apply the successive source updates.
for source in sources:
updater(source)
def _create_updater(target, **opts):
"""
:param target: the update target
:param opts: the following keyword options:
:keyword recursive: if True, then apply the update recursively to
nested dictionaries
:return: the function to apply to a *source* argument
"""
if isinstance(target, Mapping):
if opts.get('recursive'):
return functools.partial(_update_dict_recursive, target)
else:
# Apply the standard Python dictionary update.
return lambda src: target.update(src)
else:
return functools.partial(_update_collection, target)
def _update_dict_recursive(target, source):
for key, srcval in source.iteritems():
if key in target:
tgtval = target[key]
# If the target value can be merged from the source
# value, then replace the target value with a shallow
# copy and update it recursively.
if isinstance(tgtval, Mapping) and isinstance(srcval, Mapping):
target[key] = tgtval = copy(tgtval)
_update_dict_recursive(tgtval, srcval)
continue
# Set the target item.
target[key] = copy(srcval)
def _validate_update_compatibility(target, *sources):
if isinstance(target, Mapping):
for source in sources:
if not isinstance(source, Mapping):
raise TypeError("Update source is incompatible with the"
" dictionary target: %s" % source)
elif isinstance(target, list) or isinstance(target, set):
for source in sources:
if not is_nonstring_iterable(source):
raise TypeError("Update source is incompatible with the"
" collection target: %s" % source)
else:
raise TypeError("Update target is type is not supported: %s" % target)
def _update_collection(target, source):
"""
Adds to the target those source items which are not
yet in the target, as described in :meth:`update`.
:param target: the list or set to update
:param source: the input non-string iterable
:raise TypeError: if the target is neither a list or set
"""
if isinstance(target, set):
target.update(source)
elif isinstance(target, list):
exclude = set(target)
diff = (item for item in source if item not in exclude)
target.extend(diff)
else:
raise TypeError("Update target type not supported")
class ImmutableDict(dict):
"""
ImmutableDict is a dictionary that cannot be changed after creation.
An ImmutableDict is *not* hashable and therefore cannot be used as a
dictionary key or set member. See http://www.python.org/dev/peps/pep-0351
for the rationale.
"""
def __init__(self, *args, **kwargs):
super(ImmutableDict, self).__init__(*args, **kwargs)
def __setitem__(self, key, value):
"""
:raise NotImplementedError: always
"""
raise NotImplementedError("The dictionary is immutable: %s" % self)
EMPTY_DICT = ImmutableDict()
"""
An immutable empty dictionary.
This constant serves as an efficient method return default value.
"""
| {
"repo_name": "ohsu-qin/qiutil",
"path": "qiutil/collections.py",
"copies": "1",
"size": "8275",
"license": "bsd-2-clause",
"hash": -8779240159552352000,
"line_mean": 32.1,
"line_max": 122,
"alpha_frac": 0.64,
"autogenerated": false,
"ratio": 4.375991538868323,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020658490874065367,
"num_lines": 250
} |
# Absolute import (the default in a future Python release) resolves
# the collections import as the standard Python collections module
# rather than the staging collections module.
from __future__ import absolute_import
import os
import re
import glob
from bunch import Bunch
from collections import defaultdict
from ..helpers.logging import logger
import qixnat
import qidicom.hierarchy
from .. import staging
from ..helpers.constants import (SUBJECT_FMT, SESSION_FMT)
from . import image_collection
from .roi import iter_roi
from .staging_error import StagingError
def iter_stage(project, collection, *inputs, **opts):
"""
Iterates over the the scans in the given input directories.
This method is a staging generator which yields a tuple consisting
of the {subject, session, scan, dicom, roi} object.
The input directories conform to the
:attr:`qipipe.staging.image_collection.Collection.patterns`
``subject`` regular expression.
Each iteration {subject, session, scan, dicom, roi} object
is formed as follows:
- The *subject* is the XNAT subject name formatted by
:data:`SUBJECT_FMT`.
- The *session* is the XNAT experiment name formatted by
:data:`SESSION_FMT`.
- The *scan* is the XNAT scan number.
- *dicom* is the DICOM directory.
- *roi* is the ROI directory.
:param project: the XNAT project name
:param collection: the
:attr:`qipipe.staging.image_collection.Collection.name`
:param inputs: the source subject directories to stage
:param opts: the following keyword option:
:keyword scan: the scan number to stage
(default stage all detected scans)
:keyword skip_existing: flag indicating whether to ignore each
existing session, or scan if the *scan* option is set
(default True)
:yield: the {subject, session, scan, dicom, roi} objects
"""
# Validate that there is a collection.
if not collection:
raise StagingError('Staging is missing the image collection name')
# Group the new DICOM files into a
# {subject: {session: {scan: scan iterators}} dictionary.
stg_dict = _collect_visits(project, collection, *inputs, **opts)
# Generate the {subject, session, scan} objects.
_logger = logger(__name__)
for sbj, sess_dict in stg_dict.iteritems():
for sess, scan_dict in sess_dict.iteritems():
for scan, scan_dirs in scan_dict.iteritems():
# The scan must have DICOM files.
if scan_dirs.dicom:
_logger.debug("Staging %s %s scan %d..." % (sbj, sess, scan))
yield Bunch(subject=sbj, session=sess, scan=scan, **scan_dirs)
_logger.info("Staged %s %s scan %d." % (sbj, sess, scan))
else:
_logger.info("Skipping %s %s scan %d since no DICOM files"
" were found for this scan." %
(sbj, sess, scan))
def _collect_visits(project, collection, *inputs, **opts):
"""
Collects the sessions in the given input directories.
:param project: the XNAT project name
:param collection: the TCIA image collection name
:param inputs: the source DICOM subject directories
:param opts: the :meth:`iter_stage` options
:return: the {subject: {session: {scan: {dicom, roi}}}}
dictionary
"""
# The visit (subject, session, scan dictionary) tuple generator.
visits = VisitIterator(project, collection, *inputs, **opts)
# The dictionary to build.
visit_dict = defaultdict(dict)
# Add each tuple as a dictionary entry.
for sbj, sess, scan_dict in visits:
visit_dict[sbj][sess] = scan_dict
return visit_dict
class VisitIterator(object):
"""Scan DICOM generator class ."""
def __init__(self, project, collection, *session_dirs, **opts):
"""
:param project: the XNAT project name
:param collection: the image collection name
:param session_dirs: the session directories over which
to iterate
:param opts: the :meth:`iter_stage` options
"""
self.project = project
"""The :meth:`iter_stage` project name parameter."""
self.collection = image_collection.with_name(collection)
"""The :meth:`iter_stage` collection name parameter."""
self.session_dirs = session_dirs
"""The input directories."""
self.scan = opts.get('scan')
"""The :meth:`iter_stage` scan number option."""
self.skip_existing = opts.get('skip_existing', True)
"""The :meth:`iter_stage` *skip_existing* flag option."""
self.logger = logger(__name__)
def __iter__(self):
"""
Returns the next (subject, session, scan_dict) tuple for the
scans in the session directories, where:
- *subject* is the subject name
- *session* is the session name
- *scan_dict* is the scan {number: {dicom, roi}}
dictionary
:return: the next (subject, session, scan_dict) tuple
"""
# The visit subdirectory matcher.
vpat = self.collection.patterns.session
# The {scan number: {dicom, roi}} directory search patterns.
all_scan_pats = self.collection.patterns.scan
# The selected directory search patterns.
if self.scan:
# Filter on only the specified scan.
if self.scan not in all_scan_pats:
raise StagingError("The %s scan %d is not supported"
" with an image collection DICOM"
" pattern" %
(self.collection.name, self.scan))
scan_pats = {self.scan: all_scan_pats[self.scan]}
else:
# Detect all scans.
scan_pats = all_scan_pats
# Filter existing scans if the skip_existing flag and scan
# number are set.
filter_scan = self.skip_existing and self.scan
# Skip all scans of an existing session if the skip_existing
# flag is set and the scan number is not set.
skip_existing_session = self.skip_existing and not self.scan
# Iterate over the visits.
with qixnat.connect():
# Generate the new (subject, session, {scan: directory})
# tuples for each visit.
for input_dir in self.session_dirs:
sess_dir = os.path.abspath(input_dir)
self.logger.debug("Discovering scans in %s..." % sess_dir)
# The input directory is /path/to/<subject>/<visit>.
sbj_dir, sess_basename = os.path.split(sess_dir)
_, sbj_basename = os.path.split(sbj_dir)
sbj_nbr = self._match_subject_number(sbj_basename)
# Make the XNAT subject name.
sbj = SUBJECT_FMT % (self.collection.name, sbj_nbr)
# The visit (session) number.
sess_nbr = self._match_session_number(sess_basename)
# The XNAT session name.
sess = SESSION_FMT % sess_nbr
if skip_existing_session and not self._is_new_session(sbj, sess):
self.logger.debug("Skipping the existing %s %s session"
" in %s." % (sbj, sess, sess_dir))
continue
# The DICOM and ROI directories for each scan number.
scan_dict = {}
for scan, pats in scan_pats.iteritems():
if not filter_scan or self._is_new_scan(sbj, sess, scan):
scan_dirs = self._scan_directories(pats, sess_dir)
if scan_dirs:
scan_dict[scan] = scan_dirs
if scan_dict:
scans = scan_dict.keys()
self.logger.info("Discovered %s %s scans %s in %s." %
(sbj, sess, scans, sess_dir))
yield sbj, sess, scan_dict
else:
self.logger.info("No %s %s scans were discovered"
" in %s." % (sbj, sess, sess_dir))
def _scan_directories(self, patterns, input_dir):
# The DICOM directory pattern.
dcm_pat = "%s/%s" % (input_dir, patterns.dicom)
# The DICOM directory matches.
dcm_dirs = glob.glob(dcm_pat)
# If no DICOM directory, then the scan will be ignored.
if dcm_dirs:
self.logger.debug("Discovered DICOM directories %s." % dcm_dirs)
else:
dcm_dirs = None
self.logger.debug("No directory matches the DICOM pattern %s." %
dcm_pat)
# The ROI directory is optional.
roi_dirs = []
# The ROI glob pattern.
if hasattr(patterns, 'roi'):
# The ROI directory pattern.
roi_pat = "%s/%s" % (input_dir, patterns.roi.glob)
# The ROI directory matches.
roi_dirs = glob.glob(roi_pat)
if roi_dirs:
self.logger.debug("Discovered %d ROI directories." %
len(roi_dirs))
else:
self.logger.debug("No directory was found matching the"
" ROI pattern %s." % roi_pat)
return Bunch(dicom=dcm_dirs, roi=roi_dirs)
def _match_subject_number(self, path):
"""
:param path: the directory path
:return: the subject number
:raise StagingError: if the path does not match the collection subject
pattern
"""
match = self.collection.patterns.subject.match(path)
if not match:
raise StagingError(
"The directory path %s does not match the subject pattern %s." %
(path, self.collection.patterns.subject.pattern))
return int(match.group(1))
def _match_session_number(self, path):
"""
:param path: the directory path
:return: the session number
:raise StagingError: if the path does not match the collection session
pattern
"""
match = self.collection.patterns.session.match(path)
if not match:
raise StagingError(
"The directory path %s does not match the session pattern %s." %
(path, self.collection.patterns.session.pattern))
return int(match.group(1))
def _is_new_session(self, subject, session):
with qixnat.connect() as xnat:
sess = xnat.find_one(self.project, subject, session)
if sess:
logger(__name__).debug("Skipping %s %s since it has already been"
" loaded to XNAT." % (subject, session))
return not sess
def _is_new_scan(self, subject, session, scan):
with qixnat.connect() as xnat:
scan_obj = xnat.find_one(self.project, subject, session, scan=scan)
if scan_obj:
logger(__name__).debug("Skipping %s %s scan %d since it has"
" already been loaded to XNAT." %
(subject, session, scan))
return not scan_obj
def _scan_dicom_generator(pattern, tag):
"""
:param pattern: the DICOM file glob pattern
:param tag: the DICOM volume tag
:yield: the {volume: [DICOM files]} dictionary
"""
# The visit directory DICOM file iterator.
dicom_files = glob.iglob(pattern)
# Group the DICOM files by volume.
yield qidicom.hierarchy.group_by(tag, *dicom_files)
| {
"repo_name": "ohsu-qin/qipipe",
"path": "qipipe/staging/iterator.py",
"copies": "1",
"size": "11711",
"license": "bsd-2-clause",
"hash": -1979158368410731000,
"line_mean": 38.6983050847,
"line_max": 82,
"alpha_frac": 0.5798821621,
"autogenerated": false,
"ratio": 4.15136476426799,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.523124692636799,
"avg_score": null,
"num_lines": null
} |
# Absolute import (the default in a future Python release) resolves
# the logging import as the Python standard logging module rather
# than this module of the same name.
from __future__ import absolute_import
import os
import logging
import logging.config
import yaml
from . import collections as qicollections
LOG_CFG_ENV_VAR = 'LOG_CONFIG'
"""The user-defined environment variable logging configuration file path."""
LOG_CFG_FILE = 'logging.yaml'
"""The optional current working directory logging configuration file name."""
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
"""The ``qiutil`` package directory."""
DEF_LOG_CFG = os.path.join(BASE_DIR, 'conf', LOG_CFG_FILE)
"""The default logging configuration file path."""
class LogError(Exception):
pass
def logger(name):
"""
This method is the preferred way to obtain a logger.
Example:
>>> from qiutil.logging import logger
>>> logger(__name__).debug("Starting my application...")
:Note: Python ``nosetests`` captures log messages and only
reports them on failure.
:param name: the caller's context ``__name__``
:return: the Python Logger instance
"""
# Configure on demand.
if not hasattr(logger, 'configured'):
configure(name)
return logging.getLogger(name)
def configure(*names, **opts):
"""
Configures logging. The logging configuration is obtained from from
the given keyword arguments and the YAML_ logging configuration files.
The following logging configuration files are loaded in low-to-high
precedence:
- the ``qiutil`` module ``conf/logging.yaml`` file
- the ``logging.yaml`` file in the current directory
- the file specified by the ``LOG_CFG`` environment variable
- the *cfg_file* parameter
The ``opts`` keyword arguments specify simple logging parameters that
override the configuration file settings. The keyword arguments
can include the *filename* and *level* short-cuts, which are handled
as follows:
- if the *filename* is None, then file logging is disabled. Otherwise,
the file handler file name is set to the *filename* value.
- The *level* is set for the logger. In addition, if the logger has a
file handler, then that file handler level is set. Otherwise, the
console handler level is set.
The logging configuration file ``formatters``, ``handlers`` and
``loggers`` sections are updated incrementally. For example, the
``conf/logging.yaml`` source distribution file defines the ``default``
formatter ``format`` and ``datefmt``. If the ``logging.yaml`` file in
the current directory overrides the ``format`` but not the ``datefmt``,
then the default ``datefmt`` is retained rather than unset. Thus, a custom
logging configuration file need define only the settings which override
the default configuration.
By default, ``ERROR`` level messages are written to the console.
If the log file is set, then the default logger writes ``INFO`` level
messages to a rotating log file.
If the file handler is enabled, then this :meth:`qiutil.logging.configure`
method ensures that the log file parent directory exists.
Examples:
- Write to the log:
>>> from qiutil.logging import logger
>>> logger(__name__).debug("Started the application...")
or, in a class instance:
>>> from qiutil.logging import logger
>>> class MyApp(object):
... def __init__(self):
... self._logger = logger(__name__)
... def start(self):
... self._logger.debug("Started the application...")
- Write debug messages to the file log:
>>> import qiutil
>>> qiutil.logging.configure(level='DEBUG')
- Set the log file:
>>> import qiutil
>>> qiutil.logging.configure(filename='log/myapp.log')
- Define your own logging configuration:
>>> import qiutil
>>> qiutil.logging.configure('/path/to/my/conf/logging.yaml')
- Simplify the console log message format by creating the following
``./logging.yaml`` customization::
---
formatters:
simple:
format: '%(name)s - %(message)s'
handlers:
console:
formatter: simple
.. _YAML: http://www.yaml.org
:param names: the logging contexts (default root)
:param opts: the Python ``logging.conf`` options, as well as the
following short-cuts:
:keyword config: the custom configuration YAML file
:keyword filename: the log file path
:keyword level: the file handler log level
"""
# Load the configuration files.
cfg_file = opts.get('config')
config = _load_config(cfg_file)
# Extract the logger options from the config options.
logger_opts = {k: opts.pop(k) for k in ['filename', 'level'] if k in opts}
# The filename option overrides the configuration files.
fname = logger_opts.get('filename')
if fname:
# Reset the log file.
config['handlers']['file']['filename'] = fname
# Make the loggers dictionary, if necessary.
if not 'loggers' in config:
config['loggers'] = {}
# Configure the loggers.
for name in names:
_configure_logger(name, config, **logger_opts)
# Add the other options, if any.
qicollections.update(config, opts, recursive=True)
# Ensure that all log file parent directories exist.
for handler in config['handlers'].itervalues():
log_file = handler.get('filename')
if log_file:
# Make the log file parent directory, if necessary.
log_dir = os.path.dirname(log_file)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
# Make the log file path absolute for clarity.
handler['filename'] = os.path.abspath(log_file)
# Configure logging.
logging.config.dictConfig(config)
# Set the logger configured flag.
setattr(logger, 'configured', True)
def _configure_logger(name, config, **opts):
loggers = config['loggers']
logger = loggers.get(name)
if not logger:
# Copy the root configuration.
logger = loggers[name] = dict(propagate=False)
logger.update(config['root'])
# If file logging is set, then direct messages to the file.
if opts.get('filename'):
logger['handlers'] = ['file']
# The log level is set in both the logger and the handler,
# and the more restrictive level applies. Therefore, set
# the log level in both places.
level = opts.pop('level', None)
if level:
# Set the logger level.
logger['level'] = level
# Set the handler levels.
for handler_key in logger['handlers']:
handler = config['handlers'][handler_key]
handler['level'] = level
# Add the other options, if any.
qicollections.update(config, opts, recursive=True)
# Ensure that all log file parent directories exist.
for handler in config['handlers'].itervalues():
log_file = handler.get('filename')
if log_file:
# Make the log file parent directory, if necessary.
log_dir = os.path.dirname(log_file)
if log_dir and not os.path.exists(log_dir):
os.makedirs(log_dir)
# Make the log file path absolute for clarity.
handler['filename'] = os.path.abspath(log_file)
def _load_config(cfg_file=None):
"""
Loads the logger configuration files, as described in
:meth:`qiutil.logging.configure`.
:return: the logging configuration dictionary
:raises ValueError: if the configuration file argument is specified
but does not exist
"""
# The base config file.
config = _load_config_file(DEF_LOG_CFG)
# The custom configuration files.
custom_cfg_files = _find_custom_config_files(cfg_file)
# Load the custom configurations.
custom_cfgs = (_load_config_file(f) for f in custom_cfg_files)
# Update the base configuration.
qicollections.update(config, *custom_cfgs, recursive=True)
return config
def _find_custom_config_files(cfg_file):
"""
Finds the custom logging configuration files, as described in
:meth:`qiutil.logging.configure`.
:param cfg_file: the custom configuration file argument
:return: the custom configuration file list
:raises ValueError: if the configuration file argument is specified
but does not exist
"""
# The config files list.
config_files = []
# The environment variable log configuration file.
env_cfg_file = os.getenv(LOG_CFG_ENV_VAR, None)
if env_cfg_file and os.path.exists(env_cfg_file):
config_files.append(env_cfg_file)
# The current directory log configuration file.
if os.path.exists(LOG_CFG_FILE):
config_files.append(LOG_CFG_FILE)
# The argument log configuration file.
if cfg_file:
if os.path.exists(cfg_file):
config_files.append(cfg_file)
else:
raise LogError("Configuration file not found: %s" % cfg_file)
return config_files
def _load_config_file(filename):
"""
Loads the given logger configuration file.
:param: filename: the log configuration file path
:return: the parsed configuration parameter dictionary
"""
with open(filename) as fs:
return yaml.load(fs)
| {
"repo_name": "ohsu-qin/qiutil",
"path": "qiutil/logging.py",
"copies": "1",
"size": "9672",
"license": "bsd-2-clause",
"hash": 8595559075622866000,
"line_mean": 33.0563380282,
"line_max": 78,
"alpha_frac": 0.6420595533,
"autogenerated": false,
"ratio": 4.364620938628159,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.03232148573257728,
"num_lines": 284
} |
## Absolute location where all raw files are
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Dec_12_2016_Penalva_Musashi1_U251/RNA-Seq'
## Output directory
OUT_DIR = '/home/cmb-06/as/skchoudh/rna/Dec_12_2016_Penalva_Musashi1_U251'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## Path to RSEM index (will be generated if does not exist)
RSEM_INDEX_PREFIX = '/home/cmb-panasas2/skchoudh/genomes/hg38/rsem_index/hg38'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
| {
"repo_name": "saketkc/ribo-seq-snakemake",
"path": "configs/Dec_12_2016_Penalva_Musashi1_U251.py",
"copies": "1",
"size": "2378",
"license": "bsd-3-clause",
"hash": -6263189815986704000,
"line_mean": 36.746031746,
"line_max": 145,
"alpha_frac": 0.7405382675,
"autogenerated": false,
"ratio": 2.6839729119638824,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3924511179463882,
"avg_score": null,
"num_lines": null
} |
## Absolute location where all raw files are
RAWDATA_DIR = '/home/cmb-06/as/skchoudh/dna/Dec_12_2017_Penalva_RPS5_RNAseq_and_Riboseq'
## Output directory
OUT_DIR = '/home/cmb-panasas2/skchoudh/rna/Dec_12_2017_Penalva_RPS5_RNAseq_and_Riboseq'
## Absolute location to 're-ribo/scripts' directory
SRC_DIR = '/home/cmb-panasas2/skchoudh/github_projects/re-ribo/scripts'
## Genome fasta location
GENOME_FASTA = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.fa'
## Chromosome sizes location
CHROM_SIZES = '/home/cmb-panasas2/skchoudh/genomes/hg38/fasta/hg38.chrom.sizes'
## Path to STAR index (will be generated if does not exist)
STAR_INDEX = '/home/cmb-panasas2/skchoudh/genomes/hg38/star_annotated'
## Path to RSEM index (will be generated if does not exist)
RSEM_INDEX_PREFIX = '/home/cmb-panasas2/skchoudh/genomes/hg38/rsem_index/hg38'
## GTF path
GTF = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.annotation.without_rRNA_tRNA.gtf'
## GenePred bed downloaded from UCSC
## (this is used for inferring the type of experiment i.e stranded/non-stranded
## and hence is not required)
GENE_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v24.genes.bed'
## Path to bed file with start codon coordinates
START_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.start_codon.bed'
## Path to bed file with stop codon coordinates
STOP_CODON_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.stop_codon.bed'
## Path to bed file containing CDS coordinates
CDS_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.cds.bed'
UTR5_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR5.bed'
UTR3_BED = '/home/cmb-panasas2/skchoudh/genomes/hg38/annotation/gencode.v25.gffutils.UTR3.bed'
## Name of python2 environment
## The following package needs to be installed in that environment
## numpy scipy matploltib seaborn pysam pybedtools htseq
## you can do: conda create -n python2 PYTHON=2 && source activate python2 && conda install numpy scipy matploltib seaborn pysam pybedtools htseq
PYTHON2ENV = 'python2'
############################################Do Not Edit#############################################
HTSEQ_STRANDED = 'yes'
FEATURECOUNTS_S = '-s 1'
FEATURECOUNTS_T = 'CDS'
HTSEQ_MODE = 'intersection-strict'
| {
"repo_name": "saketkc/ribo-seq-snakemake",
"path": "configs/Dec_12_2017_Penalva_RPS5.py",
"copies": "1",
"size": "2393",
"license": "bsd-3-clause",
"hash": -9127198814888901000,
"line_mean": 36.9841269841,
"line_max": 145,
"alpha_frac": 0.7417467614,
"autogenerated": false,
"ratio": 2.685746352413019,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3927493113813019,
"avg_score": null,
"num_lines": null
} |
"""absolute_massgov_eopss_url
Revision ID: a1b42c9006a7
Revises: 9b30b0fe231a
Create Date: 2017-06-26 00:02:45.998655
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from document import Document
from urllib import parse
# revision identifiers, used by Alembic.
revision = 'a1b42c9006a7'
down_revision = '9b30b0fe231a'
branch_labels = None
depends_on = None
def upgrade():
pass
# This migration was a one-time fix of data, and doesn't need to be run ever
# again, and gets in the way of future modifications to the Document class.
# It's left here for version number continuity, but is otherwise dead.
# def ensure_absolute(url):
# root_url = "https://www.mass.gov/"
# if not url.startswith(root_url):
# return parse.urljoin(root_url, url)
# return url
#
# # Attach to the migration's session
# session = Session(bind=op.get_bind())
# docs = session.query(Document).filter(
# Document.site == Document.Site.MASSGOV_EOPSS.name).all()
# for doc in docs:
# doc.url = ensure_absolute(doc.url)
# session.add_all(docs)
# session.commit()
def downgrade():
# Do nothing for the rollback.
pass
| {
"repo_name": "RagtagOpen/bidwire",
"path": "bidwire/alembic/versions/a1b42c9006a7_absolute_massgov_eopss_url.py",
"copies": "1",
"size": "1311",
"license": "mit",
"hash": -270874387347668770,
"line_mean": 26.3125,
"line_max": 80,
"alpha_frac": 0.6910755149,
"autogenerated": false,
"ratio": 3.1975609756097563,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9379711593390414,
"avg_score": 0.0017849794238683128,
"num_lines": 48
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.