code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
'''
Test Cases for DocumentConverter Class for WordCloud Project
Daniel Klein
Computer-Based Honors Program
The University of Alabama
9.27.2013
'''
import unittest
import os, os.path
from src.core.python.SupremeCourtOpinionFileConverter import SupremeCourtOpinionFileConverter
##### Here are all the global variables used in these tests.
VALID_OPINION_FILE_LINES = ([
"""\
TITLE: UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
""",
"""CASE NUMBER: No. 43""",
"""US CITATION: 323 U.S. 273""",
"""SUPREME COURT CITATION: 65 S. Ct. 249""",
"""LAWYERS ED CITATION: 89 L. Ed. 236""",
"""LEXIS CITATION: 1944 U.S. LEXIS 1230""",
"""\
FULL CITATION: 323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230\
""",
"""DATES: November 8, 1944, Argued;December 18, 1944, Decided;""",
"""DISPOSITION: 53 F.Supp. 596, affirmed.""",
"""OPINION TYPE: concur""",
"""* * * * * * * *""",
"""MR. JUSTICE MURPHY, concurring.""",
"""I join in the opinion of the Court and believe that the judgment should be \
affirmed.""",
"""Congress has the constitutional power to fix venue at any place where a \
crime occurs. Our problem here is to determine, in the absence of a specific \
venue provision, where the crime outlawed by the Federal Denture Act occurred \
for purposes of venue.""",
"""The Act prohibits the use of the mails for the purpose of sending or \
bringing into any state certain prohibited articles. It is undisputed that \
when a defendant places a prohibited article in the mails in Illinois for \
the purpose of sending it into Delaware he has completed a statutory offense. \
Hence he is triable in Illinois. But to hold that the statutory crime also \
encompasses the receipt of the prohibited article in Delaware, justifying a \
trial at that point, requires an implication that I am unwilling to make in \
the absence of more explicit Congressional language.""",
"""Very often the difference between liberty and imprisonment in cases where \
the direct evidence offered by the government and the defendant is evenly \
balanced depends upon the presence of character witnesses. The defendant is \
more likely to obtain their presence in the district of his residence, which \
in this instance is usually the place where the prohibited article is mailed. \
The inconvenience, expense and loss of time involved in transplanting these \
witnesses to testify in trials far removed from their homes are often too \
great to warrant their use. Moreover, they are likely to lose much of their \
effectiveness before a distant jury that knows nothing of their reputations. \
Such factors make it difficult for me to conclude, where Congress has not \
said so specifically, that we should construe the Federal Denture Act as \
covering more than the first sufficient and punishable use of the mails \
insofar as the sender of a prohibited article is concerned. The principle of \
narrow construction of criminal statutes does not warrant interpreting the \
"use" of the mails to cover all possible uses in light of the foregoing \
considerations."""])
CASE_TITLE = """\
UNITED STATES v. JOHNSON ET AL., DOING BUSINESS AS UNITED STATES\
DENTAL CO., ET AL.\
"""
CASE_NUM = "No. 43"
CASE_US_CITE = "323 U.S. 273"
CASE_SUPREME_COURT_CITE = "65 S. Ct. 249"
CASE_LAWYERS_ED_CITE = "89 L. Ed. 236"
CASE_LEXIS_CITE = "1944 U.S. LEXIS 1230"
CASE_FULL_CITE = "323 U.S. 273; 65 S. Ct. 249; 89 L. Ed. 236; 1944 U.S. LEXIS 1230"
CASE_DATES = "November 8, 1944 (Argued) December 18, 1944 (Decided) " # THIS MIGHT CHANGE!!
CASE_DISPOSITION = "53 F.Supp. 596, affirmed."
OPINION_AUTHOR = "MURPHY"
OPINION_TYPE = "concur"
OPINION_TEXT = "\n".join(VALID_OPINION_FILE_LINES[11:])
TEST_FILE_PATH = os.path.join(os.path.abspath(os.curdir), "MURPHY_1944 U.S. LEXIS 1230.txt")
TEST_PICKLE_PATH = os.path.join(os.path.abspath(os.curdir), "pickled_test_doc")
#####
def create_test_file(file_lines):
with open(TEST_FILE_PATH, 'w') as test_file:
for line in file_lines:
test_file.write(line + "\n")
class DocumentConverterTest(unittest.TestCase):
def setUp(self):
'''
what do i need to run tests?
- a test file.
'''
self.test_path = TEST_FILE_PATH
self.test_converter = SupremeCourtOpinionFileConverter(self.test_path, TEST_PICKLE_PATH)
def tearDown(self):
if os.path.exists(self.test_path):
os.remove(self.test_path)
if os.path.exists(TEST_PICKLE_PATH):
os.chmod(TEST_PICKLE_PATH, 0777)
os.remove(TEST_PICKLE_PATH)
del self.test_converter
def testNormalCase(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() normal case...")
# create a normal test file
create_test_file(VALID_OPINION_FILE_LINES)
converted_doc = self.test_converter.convert_file()
print("Word count: {0}".format(converted_doc.word_count))
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
def testNoMetadataInFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with no Metadata in the input file...")
# create a test file without any metadata fields in it
create_test_file(VALID_OPINION_FILE_LINES[10:])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, '')
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, "")
#self.fail("DocumentConverterTest: I haven't written testNoMetadataInFile yet.")
def testNoBodyTextInFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with no body text in the input file...")
# create a test file with valid metadata but without any body text in it
create_test_file(VALID_OPINION_FILE_LINES[:11])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, "")
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
#self.fail("DocumentConverterTest: I haven't written testNoBodyTextInFile yet.")
def testOutputFileNotWritable(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"and save_converted_doc() with an unwritable output file...")
create_test_file(VALID_OPINION_FILE_LINES)
converted_doc = self.test_converter.convert_file()
# assert stuff about the created converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, OPINION_TEXT)
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, CASE_TITLE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, OPINION_TYPE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, CASE_NUM)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, CASE_US_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, CASE_SUPREME_COURT_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, CASE_LAWYERS_ED_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, CASE_LEXIS_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, CASE_FULL_CITE)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, CASE_DATES)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, CASE_DISPOSITION)
# I need to change the permisssions of the pickle_path (chmod 0444)
with open(converted_doc.output_filename, 'w') as dummy:
pass
os.chmod(converted_doc.output_filename, 0444)
self.assertRaises(IOError, self.test_converter.save_converted_doc)
#self.fail("DocumentConverterTest: I haven't written testOutputFileNotWritable yet.")
def testInputFileNonexistent(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with nonexistent input file...")
# skip the create_test_file call and just try to convert.
self.assertRaises(IOError, self.test_converter.convert_file)
#self.fail("DocumentConverterTest: I haven't written testInputFileNonexistent yet.")
def testEmptyInputFile(self):
print("DocumentConverterTest: testing DocumentConverter.convert_file() "
"with completely empty input file...")
# create a test file with nothing in it
create_test_file([])
converted_doc = self.test_converter.convert_file()
# here assert a bunch of things about the resulting converted_doc
self.assertTrue(hasattr(converted_doc, 'output_filename'))
self.assertEqual(converted_doc.output_filename, TEST_PICKLE_PATH)
self.assertTrue(hasattr(converted_doc, 'doc_text'))
self.assertEqual(converted_doc.doc_text, "")
self.assertTrue(hasattr(converted_doc, 'doc_metadata'))
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_title'))
self.assertEqual(converted_doc.doc_metadata.case_title, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_author'))
self.assertEqual(converted_doc.doc_metadata.opinion_author, OPINION_AUTHOR)
self.assertTrue(hasattr(converted_doc.doc_metadata, 'opinion_type'))
self.assertEqual(converted_doc.doc_metadata.opinion_type, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_num'))
self.assertEqual(converted_doc.doc_metadata.case_num, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_us_cite'))
self.assertEqual(converted_doc.doc_metadata.case_us_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_supreme_court_cite'))
self.assertEqual(converted_doc.doc_metadata.case_supreme_court_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lawyers_ed_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lawyers_ed_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_lexis_cite'))
self.assertEqual(converted_doc.doc_metadata.case_lexis_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_full_cite'))
self.assertEqual(converted_doc.doc_metadata.case_full_cite, "")
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_dates'))
self.assertEqual(converted_doc.doc_metadata.case_dates, '')
self.assertTrue(hasattr(converted_doc.doc_metadata, 'case_disposition'))
self.assertEqual(converted_doc.doc_metadata.case_disposition, "")
#self.fail("DocumentConverterTest: I haven't written testEmptyInputFile yet.")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
dmarklein/WordCloud
|
test/unit/python/DocumentConverterTest.py
|
Python
|
apache-2.0
| 18,681
|
from ..fields.chart_field import ChartField
from ..fields.title_field import TitleField
from ..fields.series_field import SeriesField
from ..fields.series.series import Series
from ..fields.plot_options_field import PlotOptionsField
from ..fields.plot_options.pie_plot_options import PiePlotOptions
from highchart import HighChart
class PieChart(HighChart):
def __init__(self, title, data_label, data):
'''
:param title: e.g. 'Browser market shares at a specific website, 2014'
:param data_label: e.g. 'Browser shares'
:param data: e.g. [ ['Firefox', 45.0], ['IE', 26.8], ['Chrome', 12.8], ... ]
'''
self.title_field = TitleField(text=title)
self.series_field = SeriesField()
self.series_field.add_serie(Series(data_label, data))
self.plot_options_field = PlotOptionsField()
self.plot_options_field.add_plot_option(PiePlotOptions())
self.chart_field = ChartField()
self.chart_field.set_type('pie')
def to_javascript(self):
jsc = "{"
jsc += self.chart_field.to_javascript() + ", "
jsc += self.title_field.to_javascript() + ", "
jsc += self.plot_options_field.to_javascript() + ", "
jsc += self.series_field.to_javascript()
jsc += "}"
return jsc
|
jpmfribeiro/PyCharts
|
pycharts/charts/pie_chart.py
|
Python
|
mit
| 1,329
|
from __future__ import division, print_function
import numpy as np
np.random.seed(1337)
from copy import deepcopy
from numpy import log, sqrt
from numpy.random import choice
from time import time
from keras.models import load_model
import keras
import numba as nb
from numba import jit
from heuristic_agent import *
size = 15
goal = 5
@jit(nopython=True, nogil=True)
def possible_actions(state):
actions = []
for row in range(size):
for col in range(size):
if not state[row, col]:
actions.append((row, col))
return actions
@jit(nopython=True, nogil=True)
def check_row(state, row, col):
if 4 <= col and\
state[row, col - 4] == state[row, col - 3] == state[row, col - 2] ==\
state[row, col - 1] == state[row, col]:
return True
if 3 <= col <= 13 and\
state[row, col - 3] == state[row, col - 2] == state[row, col - 1] ==\
state[row, col] == state[row, col + 1]:
return True
if 2 <= col <= 12 and\
state[row, col - 2] == state[row, col - 1] == state[row, col] ==\
state[row, col + 1] == state[row, col + 2]:
return True
if 1 <= col <= 11 and\
state[row, col - 1] == state[row, col] == state[row, col + 1] ==\
state[row, col + 2] == state[row, col + 3]:
return True
if col <= 10 and\
state[row, col] == state[row, col + 1] == state[row, col + 2] ==\
state[row, col + 3] == state[row, col + 4]:
return True
return False
@jit(nopython=True, nogil=True)
def check_col(state, row, col):
if 4 <= row and\
state[row - 4, col] == state[row - 3, col] == state[row - 2, col] ==\
state[row - 1, col] == state[row, col]:
return True
if 3 <= row <= 13 and\
state[row - 3, col] == state[row - 2, col] == state[row - 1, col] ==\
state[row, col] == state[row + 1, col]:
return True
if 2 <= row <= 12 and\
state[row - 2, col] == state[row - 1, col] == state[row, col] ==\
state[row + 1, col] == state[row + 2, col]:
return True
if 1 <= row <= 11 and\
state[row - 1, col] == state[row, col] == state[row + 1, col] ==\
state[row + 2, col] == state[row + 3, col]:
return True
if row <= 10 and\
state[row, col] == state[row + 1, col] == state[row + 2, col] ==\
state[row + 3, col] == state[row + 4, col]:
return True
return False
@jit(nopython=True, nogil=True)
def check_diag(state, row, col):
if 4 <= row and 4 <= col and\
state[row - 4, col - 4] == state[row - 3, col - 3] == state[row - 2, col - 2] ==\
state[row - 1, col - 1] == state[row, col]:
return True
if 3 <= row <= 13 and 3 <= col <= 13 and\
state[row - 3, col - 3] == state[row - 2, col - 2] == state[row - 1, col - 1] ==\
state[row, col] == state[row + 1, col + 1]:
return True
if 2 <= row <= 12 and 2 <= col <= 12 and\
state[row - 2, col - 2] == state[row - 1, col - 1] == state[row, col] ==\
state[row + 1, col + 1] == state[row + 2, col + 2]:
return True
if 1 <= row <= 11 and 1 <= col <= 11 and\
state[row - 1, col - 1] == state[row, col] == state[row + 1, col + 1] ==\
state[row + 2, col + 2] == state[row + 3, col + 3]:
return True
if row <= 10 and col <= 10 and\
state[row, col] == state[row + 1, col + 1] == state[row + 2, col + 2] ==\
state[row + 3, col + 3] == state[row + 4, col + 4]:
return True
return False
@jit(nopython=True, nogil=True)
def check_anti_diag(state, row, col):
if 4 <= row and col <= 10 and\
state[row - 4, col + 4] == state[row - 3, col + 3] == state[row - 2, col + 2] ==\
state[row - 1, col + 1] == state[row, col]:
return True
if 3 <= row <= 13 and 1 <= col <= 11 and\
state[row - 3, col + 3] == state[row - 2, col + 2] == state[row - 1, col + 1] ==\
state[row, col] == state[row + 1, col - 1]:
return True
if 2 <= row <= 12 and 2 <= col <= 12 and\
state[row - 2, col + 2] == state[row - 1, col + 1] == state[row, col] ==\
state[row + 1, col - 1] == state[row + 2, col - 2]:
return True
if 1 <= row <= 11 and 3 <= col <= 13 and\
state[row - 1, col + 1] == state[row, col] == state[row + 1, col - 1] ==\
state[row + 2, col - 2] == state[row + 3, col - 3]:
return True
if row <= 10 and 4 <= col and\
state[row, col] == state[row + 1, col - 1] == state[row + 2, col - 2] ==\
state[row + 3, col - 3] == state[row + 4, col - 4]:
return True
return False
@jit(nopython=True, nogil=True)
def next_state(state, action, stone):
state[action] = stone
return state
class Board(object):
def __init__(self):
self.state = np.zeros(shape=(size, size), dtype=np.int8)
def win(self, state, action):
row, col = action
return check_row(state, row, col) or check_col(state, row, col) or\
check_diag(state, row, col) or check_anti_diag(state, row, col)
def tie(self, state):
return len(possible_actions(state)) == 0
class Q_Trainer(object):
def __init__(self, board):
self.board = board
self.state = np.zeros(shape=(size, size), dtype=np.int8)
self.turn = 0
def train(self, X_player, O_player, epoch):
self.X_player = X_player
self.O_player = O_player
self.X_player.learning = self.O_player.learning = True
for test in range(epoch, 0, -1):
for p in range(len(self.X_player.params)):
self.O_player.params[p] += test
self.test_game()
winner = self.test_game()
if winner > 0:
self.O_player.params = self.X_player.params[:]
else:
self.X_player.params = self.O_player.params[:]
if self.O_player.params[p] > test:
self.O_player.params[p] -= test
self.test_game()
winner = self.test_game()
if winner > 0:
self.O_player.params = self.X_player.params[:]
else:
self.X_player.params = self.O_player.params[:]
self.X_player.learning = self.O_player.learning = False
def test_game(self):
self.state = np.zeros(shape=(size, size), dtype=np.int8)
self.turn = 1
for i in range(size * size):
actions = possible_actions(self.state)
if self.turn > 0:
action, danger = self.X_player.make_action(self.state)
else:
action, danger = self.O_player.make_action(self.state)
self.state[action] = self.turn
if self.board.win(self.state, action):
return self.turn
self.turn *= -1
return 0
def enhash(state):
hash_state = state.copy()
np.place(hash_state, hash_state < 0, [2])
hash_value = ''.join(''.join(str(elem) for elem in row) for row in hash_state)
return hash_value
class Node(object):
def __init__(self, parent, p, C, action):
self.parent = parent
self.children = {}
self.visits = 0
self.value = 0
self.C = C
self.p = p
self.visited = False
self.ucb_count = 0
self.policy_count = 0
self.action = action
def expand(self, actions_states_probs):
for action, S, p in actions_states_probs:
if S not in self.children:
self.children[S] = Node(self, p, self.C, action)
def is_root(self):
return self.parent is None
def is_leaf(self):
return self.children == {}
def all_visited(self):
visited = 0
for S in self.children:
if self.children[S].visited:
visited += 1
return visited == len(self.children)
def policy_choice(self):
raw = []
states = []
for S in self.children:
states.append(S)
raw.append(self.children[S].p)
self.policy_count += 1
probs = [float(p) / sum(raw) for p in raw]
pred = np.random.choice(len(states), p=probs)
S = states[pred]
self.children[S].visited = True
return S, self.children[S]
def UCB_value(self, log_total):
return (self.value / (self.visits or 1)) +\
self.C * sqrt(log_total / (self.visits or 1))
def UCB_choice(self):
self.ucb_count += 1
log_total = log(sum(self.children[S].visits for S in self.children) or 1)
return max(iter(self.children.items()), key=lambda x: x[1].UCB_value(log_total))
def LCB_value(self, log_total):
return (self.value / (self.visits or 1)) -\
self.C * sqrt(log_total / (self.visits or 1))
def LCB_choice(self):
log_total = log(sum(self.children[S].visits for S in self.children) or 1)
return max(iter(self.children.items()), key=lambda x: x[1].LCB_value(log_total))
def update(self, reward):
self.visits += 1
self.value += reward
def update_path(self, reward):
if not self.is_root():
self.parent.update_path(reward)
self.update(reward)
@jit(nopython=True, nogil=True)
def prepare_tensor(state, stone):
board = np.zeros(shape=(size, size), dtype=np.float32)
X_stones = np.zeros(shape=(size, size), dtype=np.float32)
O_stones = np.zeros(shape=(size, size), dtype=np.float32)
empty = np.zeros(shape=(size, size), dtype=np.float32)
code = np.zeros(shape=(1, size, size, 4), dtype=np.float32)
for row in range(size):
for col in range(size):
if state[row, col] > 0:
board[row, col] = 1
X_stones[row, col] = 1
elif state[row, col] < 0:
board[row, col] = 1
O_stones[row, col] = 1
else:
empty[row, col] = 1
code[:, :, :, 0] = board
if stone > 0:
code[:, :, :, 1] = X_stones
code[:, :, :, 2] = O_stones
else:
code[:, :, :, 1] = O_stones
code[:, :, :, 2] = X_stones
code[:, :, :, 3] = empty
return code
def get_actions_states_probs(actions, probs, state, stone):
actions_states_probs = []
for cell in range(size * size):
action = (cell // size, cell % size)
if action in actions and probs[cell] > 0.05:
new_state = next_state(state, action, stone)
S = enhash(new_state)
actions_states_probs.append((action, S, probs[cell]))
return actions_states_probs
class MonteCarlo(object):
def __init__(self, board, **kwargs):
self.type = 'tree'
self.board = board
self.C = kwargs.get('C', 1.4)
self.root = Node(None, 1.0, self.C, None)
self.time_limit = kwargs.get('time_limit', 10)
self.playout_model = load_model(kwargs.get('playout_model'))
self.stone = kwargs.get('stone', 0)
self.last_action = None
def prepare_Q(self):
print('Preparing Q for player', self.stone)
X = Q_learning(1)
O = Q_learning(-1)
q_trainer = Q_Trainer(self.board)
q_trainer.train(X, O, size)
X.learning = False
O.learning = False
if self.stone > 0:
self.Q = O
else:
self.Q = X
self.X = X
self.O = O
print(X.params, O.params)
print('Ready!')
def policy(self, state, mode):
code = prepare_tensor(state, self.stone)
probs = self.playout_model.predict(code, verbose=0)[0]
return probs
def uproot(self):
if self.last_S in self.root.children:
self.root = self.root.children[self.last_S]
self.root.parent = None
else:
self.root = Node(None, 1.0, self.C, None)
def run_simulation(self, in_state):
state = in_state.copy()
node = self.root
player = self.stone
for k in range(goal):
if player == self.stone:
if node.is_leaf():
probs = self.policy(state, 'playout')
actions_states_probs = get_actions_states_probs(possible_actions(state), probs, state, self.stone)
node.expand(actions_states_probs)
if node.all_visited():
S, next_node = node.UCB_choice()
action = next_node.action
else:
S, next_node = node.policy_choice()
action = next_node.action
if player > 0:
safe_action, danger = self.X.make_action(state)
else:
safe_action, danger = self.O.make_action(state)
if player == -self.stone:
action = safe_action
elif danger:
action = safe_action
S = enhash(next_state(state, action, self.stone))
if S not in node.children:
node.children[S] = Node(node, 1.0, self.C, action)
next_node = node.children[S]
state[action] = player
if player == self.stone:
node = next_node
if self.board.win(state, action) or self.board.tie(state):
break
player = -player
reward = self.rollout(state, action, player)
if player == -self.stone:
reward = -reward
node.update_path(reward)
def rollout(self, state, action, player):
if self.board.win(state, action):
return 1
if self.board.tie(state):
return 0
vic = False
while True:
if player > 0:
action, danger = self.X.make_action(state)
else:
action, danger = self.O.make_action(state)
state[action] = player
if self.board.win(state, action):
vic = True
break
if self.board.tie(state):
break
player = -player
if vic:
if player == self.stone:
return 1
else:
return -1
return 0
def get_action(self, state):
games = 0
begin = time()
while time() - begin < self.time_limit:
current_state = state.copy()
self.run_simulation(current_state)
games += 1
if self.stone > 0:
safe_action, danger = self.X.make_action(state)
else:
safe_action, danger = self.O.make_action(state)
if danger:
action = safe_action
else:
S = self.root.LCB_choice()[0]
action = self.root.children[S].action
return action
def make_action(self, state):
action = self.get_action(state)
row, col = action
self.last_S = enhash(next_state(state, action, self.stone))
self.uproot()
return action
class Two_Trees(object):
def __init__(self):
board = Board()
self.X_Tree = MonteCarlo(board, stone=1,
C=1.4,
playout_model='Neo-Heuristic.h5',
time_limit=9.5
)
self.X_Tree.prepare_Q()
self.O_Tree = MonteCarlo(board, stone=-1,
C=1.4,
playout_model='Neo-Heuristic.h5',
time_limit=9.5
)
self.O_Tree.prepare_Q()
def str_coords(self, action):
row, col = action
if col >= ord('i') - ord('a'):
col += 1
S = chr(col + ord('a'))
S += str(row + 1)
return S
def make_move(self, state, stone):
if stone > 0:
action = self.X_Tree.make_action(state)
else:
action = self.O_Tree.make_action(state)
return self.str_coords(action)
|
EterniusVGM/Renju
|
renju/MCTS.py
|
Python
|
mit
| 16,077
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CreateBackup
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-metastore
# [START metastore_v1alpha_generated_DataprocMetastore_CreateBackup_sync]
from google.cloud import metastore_v1alpha
def sample_create_backup():
# Create a client
client = metastore_v1alpha.DataprocMetastoreClient()
# Initialize request argument(s)
request = metastore_v1alpha.CreateBackupRequest(
parent="parent_value",
backup_id="backup_id_value",
)
# Make the request
operation = client.create_backup(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
# [END metastore_v1alpha_generated_DataprocMetastore_CreateBackup_sync]
|
googleapis/python-dataproc-metastore
|
samples/generated_samples/metastore_v1alpha_generated_dataproc_metastore_create_backup_sync.py
|
Python
|
apache-2.0
| 1,611
|
from collections import namedtuple
import sublime
from sublime_plugin import WindowCommand
from ..git_command import GitCommand
from ...common import util
MenuOption = namedtuple("MenuOption", ["requires_action", "menu_text", "filename", "is_untracked"])
CLEAN_WORKING_DIR = "Nothing to commit, working directory clean."
ADD_ALL_UNSTAGED_FILES = " ? All unstaged files"
ADD_ALL_FILES = " + All files"
STAGED = "--- {} files are staged for commit. ---"
COMMIT = " git: quick commit"
AMEND = " git: amend from stage"
FIXUP = " git: fixup from stage"
class GsQuickStageCommand(WindowCommand, GitCommand):
"""
Display a quick panel of unstaged files in the current git repository,
allowing the user to select one or more files for staging.
Display filenames with one of the following indicators:
* [M] modified
* [A] added
* [D] deleted
* [R] renamed/moved
* [C] copied
* [U] updated but unmerged
* [?] untracked
"""
def run(self):
sublime.set_timeout_async(self.run_async)
def run_async(self):
menu_options = self.get_menu_options()
menu_entries = [f.menu_text for f in menu_options]
def on_selection(id):
if id == -1:
return
selection = menu_options[id]
if not selection.requires_action:
return
elif selection.menu_text == COMMIT:
self.window.run_command("gs_quick_commit")
return
elif selection.menu_text == AMEND:
self.window.run_command("gs_amend")
return
elif selection.menu_text == FIXUP:
self.window.run_command("gs_fixup_from_stage")
return
elif selection.menu_text == ADD_ALL_UNSTAGED_FILES:
self.git("add", "--update", ".")
scope_of_action = "all unstaged files"
elif selection.menu_text == ADD_ALL_FILES:
self.git("add", "--all")
scope_of_action = "all files"
elif selection.is_untracked:
self.git("add", "--", selection.filename)
scope_of_action = "`{}`".format(selection.filename)
else:
self.git("add", "--update", "--", selection.filename)
scope_of_action = "`{}`".format(selection.filename)
self.window.status_message("Successfully added `{}`.".format(
scope_of_action))
util.view.refresh_gitsavvy(self.window.active_view())
sublime.set_timeout_async(self.run_async, 0)
self.window.show_quick_panel(
menu_entries,
on_selection,
flags=sublime.MONOSPACE_FONT
)
def get_menu_options(self):
"""
Determine the git status of the current working directory, and return
a list of menu options for each file that is shown.
"""
status_entries = self.get_status()
if not status_entries:
return [MenuOption(False, CLEAN_WORKING_DIR, None, None)]
menu_options = []
staged_count = 0
unstaged_count = 0
(staged_entries,
unstaged_entries,
untracked_entries,
conflict_entries) = self.sort_status_entries(self.get_status())
staged_count = len(staged_entries)
unstaged_count = len(unstaged_entries)
# untracked_count = len(untracked_entries)
# conflict_count = len(conflict_entries)
for entry in unstaged_entries:
filename = (entry.path if not entry.index_status == "R"
else entry.path + " <- " + entry.path_alt)
menu_text = "[{0}] {1}".format(entry.working_status, filename)
menu_options.append(MenuOption(True, menu_text, filename, False))
for entry in untracked_entries:
menu_text = "[{0}] {1}".format(entry.working_status, entry.path)
menu_options.append(MenuOption(True, menu_text, entry.path, True))
for entry in conflict_entries:
menu_text = "[{0}] {1}".format(entry.working_status, entry.path)
menu_options.append(MenuOption(True, menu_text, entry.path, False))
if unstaged_count > 0:
menu_options.append(MenuOption(True, ADD_ALL_UNSTAGED_FILES, None, None))
menu_options.append(MenuOption(True, ADD_ALL_FILES, None, None))
if staged_count > 0:
menu_options.append(MenuOption(False, STAGED.format(staged_count), None, None))
menu_options.append(MenuOption(True, COMMIT, None, None))
menu_options.append(MenuOption(True, AMEND, None, None))
menu_options.append(MenuOption(True, FIXUP, None, None))
return menu_options
|
divmain/GitSavvy
|
core/commands/quick_stage.py
|
Python
|
mit
| 4,843
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import configargparse
import os
import math
import json
import logging
import random
import time
import socket
import struct
import zipfile
import requests
from uuid import uuid4
from s2sphere import CellId, LatLng
from . import config
log = logging.getLogger(__name__)
def parse_unicode(bytestring):
decoded_string = bytestring.decode(sys.getfilesystemencoding())
return decoded_string
def memoize(function):
memo = {}
def wrapper(*args):
if args in memo:
return memo[args]
else:
rv = function(*args)
memo[args] = rv
return rv
return wrapper
@memoize
def get_args():
# Pre-check to see if the -cf or --config flag is used on the command line.
# If not, we'll use the env var or default value. This prevents layering of
# config files as well as a missing config.ini.
defaultconfigfiles = []
if '-cf' not in sys.argv and '--config' not in sys.argv:
defaultconfigfiles = [os.getenv('POGOMAP_CONFIG', os.path.join(
os.path.dirname(__file__), '../config/config.ini'))]
parser = configargparse.ArgParser(
default_config_files=defaultconfigfiles,
auto_env_var_prefix='POGOMAP_')
parser.add_argument('-cf', '--config',
is_config_file=True, help='Set configuration file')
parser.add_argument('-a', '--auth-service', type=str.lower,
action='append', default=[],
help=('Auth Services, either one for all accounts ' +
'or one per account: ptc or google. Defaults ' +
'all to ptc.'))
parser.add_argument('-u', '--username', action='append', default=[],
help='Usernames, one per account.')
parser.add_argument('-p', '--password', action='append', default=[],
help=('Passwords, either single one for all ' +
'accounts or one per account.'))
parser.add_argument('-w', '--workers', type=int,
help=('Number of search worker threads to start. ' +
'Defaults to the number of accounts specified.'))
parser.add_argument('-asi', '--account-search-interval', type=int,
default=0,
help=('Seconds for accounts to search before ' +
'switching to a new account. 0 to disable.'))
parser.add_argument('-ari', '--account-rest-interval', type=int,
default=7200,
help=('Seconds for accounts to rest when they fail ' +
'or are switched out.'))
parser.add_argument('-ac', '--accountcsv',
help=('Load accounts from CSV file containing ' +
'"auth_service,username,passwd" lines.'))
parser.add_argument('-hlvl', '--high-lvl-accounts',
help=('Load high level accounts from CSV file '
+ ' containing '
+ '"auth_service,username,passwd"'
+ ' lines.'))
parser.add_argument('-bh', '--beehive',
help=('Use beehive configuration for multiple ' +
'accounts, one account per hex. Make sure ' +
'to keep -st under 5, and -w under the total ' +
'amount of accounts available.'),
action='store_true', default=False)
parser.add_argument('-wph', '--workers-per-hive',
help=('Only referenced when using --beehive. Sets ' +
'number of workers per hive. Default value ' +
'is 1.'),
type=int, default=1)
parser.add_argument('-l', '--location', type=parse_unicode,
help='Location, can be an address or coordinates.')
# Default based on the average elevation of cities around the world.
# Source: https://www.wikiwand.com/en/List_of_cities_by_elevation
parser.add_argument('-alt', '--altitude',
help='Default altitude in meters.',
type=int, default=507)
parser.add_argument('-altv', '--altitude-variance',
help='Variance for --altitude in meters',
type=int, default=1)
parser.add_argument('-uac', '--use-altitude-cache',
help=('Query the Elevation API for each step,' +
' rather than only once, and store results in' +
' the database.'),
action='store_true', default=False)
parser.add_argument('-nj', '--no-jitter',
help=("Don't apply random -9m to +9m jitter to " +
"location."),
action='store_true', default=False)
parser.add_argument('-al', '--access-logs',
help=("Write web logs to access.log."),
action='store_true', default=False)
parser.add_argument('-st', '--step-limit', help='Steps.', type=int,
default=12)
parser.add_argument('-sd', '--scan-delay',
help='Time delay between requests in scan threads.',
type=float, default=10)
parser.add_argument('--spawn-delay',
help=('Number of seconds after spawn time to wait ' +
'before scanning to be sure the Pokemon ' +
'is there.'),
type=float, default=10)
parser.add_argument('-enc', '--encounter',
help='Start an encounter to gather IVs and moves.',
action='store_true', default=False)
parser.add_argument('-cs', '--captcha-solving',
help='Enables captcha solving.',
action='store_true', default=False)
parser.add_argument('-ck', '--captcha-key',
help='2Captcha API key.')
parser.add_argument('-cds', '--captcha-dsk',
help='Pokemon Go captcha data-sitekey.',
default="6LeeTScTAAAAADqvhqVMhPpr_vB9D364Ia-1dSgK")
parser.add_argument('-mcd', '--manual-captcha-domain',
help='Domain to where captcha tokens will be sent.',
default="http://127.0.0.1:5000")
parser.add_argument('-mcr', '--manual-captcha-refresh',
help='Time available before captcha page refreshes.',
type=int, default=30)
parser.add_argument('-mct', '--manual-captcha-timeout',
help='Maximum time captchas will wait for manual ' +
'captcha solving. On timeout, if enabled, 2Captcha ' +
'will be used to solve captcha. Default is 0.',
type=int, default=0)
parser.add_argument('-ed', '--encounter-delay',
help=('Time delay between encounter pokemon ' +
'in scan threads.'),
type=float, default=1)
parser.add_argument('-encwf', '--enc-whitelist-file',
default='', help='File containing a list of '
'Pokemon IDs to encounter for'
' IV/CP scanning.')
parser.add_argument('-nostore', '--no-api-store',
help=("Don't store the API objects used by the high"
+ ' level accounts in memory. This will increase'
+ ' the number of logins per account, but '
+ ' decreases memory usage.'),
action='store_true', default=False)
webhook_list = parser.add_mutually_exclusive_group()
webhook_list.add_argument('-wwht', '--webhook-whitelist',
action='append', default=[],
help=('List of Pokemon to send to '
'webhooks. Specified as Pokemon ID.'))
webhook_list.add_argument('-wblk', '--webhook-blacklist',
action='append', default=[],
help=('List of Pokemon NOT to send to '
'webhooks. Specified as Pokemon ID.'))
webhook_list.add_argument('-wwhtf', '--webhook-whitelist-file',
default='', help='File containing a list of '
'Pokemon IDs to be sent to '
'webhooks.')
webhook_list.add_argument('-wblkf', '--webhook-blacklist-file',
default='', help='File containing a list of '
'Pokemon IDs NOT to be sent to'
'webhooks.')
parser.add_argument('-ld', '--login-delay',
help='Time delay between each login attempt.',
type=float, default=6)
parser.add_argument('-lr', '--login-retries',
help=('Number of times to retry the login before ' +
'refreshing a thread.'),
type=int, default=3)
parser.add_argument('-mf', '--max-failures',
help=('Maximum number of failures to parse ' +
'locations before an account will go into a ' +
'sleep for -ari/--account-rest-interval ' +
'seconds.'),
type=int, default=5)
parser.add_argument('-me', '--max-empty',
help=('Maximum number of empty scans before an ' +
'account will go into a sleep for ' +
'-ari/--account-rest-interval seconds.' +
'Reasonable to use with proxies.'),
type=int, default=0)
parser.add_argument('-bsr', '--bad-scan-retry',
help=('Number of bad scans before giving up on a ' +
'step. Default 2, 0 to disable.'),
type=int, default=2)
parser.add_argument('-msl', '--min-seconds-left',
help=('Time that must be left on a spawn before ' +
'considering it too late and skipping it. ' +
'For example 600 would skip anything with ' +
'< 10 minutes remaining. Default 0.'),
type=int, default=0)
parser.add_argument('-dc', '--display-in-console',
help='Display Found Pokemon in Console.',
action='store_true', default=False)
parser.add_argument('-H', '--host', help='Set web server listening host.',
default='127.0.0.1')
parser.add_argument('-P', '--port', type=int,
help='Set web server listening port.', default=5000)
parser.add_argument('-L', '--locale',
help=('Locale for Pokemon names (default: {}, check ' +
'{} for more).').format(config['LOCALE'],
config['LOCALES_DIR']),
default='en')
parser.add_argument('-c', '--china',
help='Coordinates transformer for China.',
action='store_true')
parser.add_argument('-m', '--mock', type=str,
help=('Mock mode - point to a fpgo endpoint instead ' +
'of using the real PogoApi, ec: ' +
'http://127.0.0.1:9090'),
default='')
parser.add_argument('-ns', '--no-server',
help=('No-Server Mode. Starts the searcher but not ' +
'the Webserver.'),
action='store_true', default=False)
parser.add_argument('-os', '--only-server',
help=('Server-Only Mode. Starts only the Webserver ' +
'without the searcher.'),
action='store_true', default=False)
parser.add_argument('-sc', '--search-control',
help='Enables search control.',
action='store_true', dest='search_control',
default=False)
parser.add_argument('-nfl', '--no-fixed-location',
help='Disables a fixed map location and shows the ' +
'search bar for use in shared maps.',
action='store_false', dest='fixed_location',
default=True)
parser.add_argument('-k', '--gmaps-key',
help='Google Maps Javascript API Key.',
required=True)
parser.add_argument('--skip-empty',
help=('Enables skipping of empty cells in normal ' +
'scans - requires previously populated ' +
'database (not to be used with -ss)'),
action='store_true', default=False)
parser.add_argument('-C', '--cors', help='Enable CORS on web server.',
action='store_true', default=False)
parser.add_argument('-D', '--db', help='Database filename for SQLite.',
default='pogom.db')
parser.add_argument('-cd', '--clear-db',
help=('Deletes the existing database before ' +
'starting the Webserver.'),
action='store_true', default=False)
parser.add_argument('-np', '--no-pokemon',
help=('Disables Pokemon from the map (including ' +
'parsing them into local db.)'),
action='store_true', default=False)
parser.add_argument('-ng', '--no-gyms',
help=('Disables Gyms from the map (including ' +
'parsing them into local db).'),
action='store_true', default=False)
parser.add_argument('-nk', '--no-pokestops',
help=('Disables PokeStops from the map (including ' +
'parsing them into local db).'),
action='store_true', default=False)
parser.add_argument('-ss', '--spawnpoint-scanning',
help=('Use spawnpoint scanning (instead of hex ' +
'grid). Scans in a circle based on step_limit ' +
'when on DB.'),
nargs='?', const='nofile', default=False)
parser.add_argument('-speed', '--speed-scan',
help=('Use speed scanning to identify spawn points ' +
'and then scan closest spawns.'),
action='store_true', default=False)
parser.add_argument('-kph', '--kph',
help=('Set a maximum speed in km/hour for scanner ' +
'movement.'),
type=int, default=35)
parser.add_argument('-hkph', '--hlvl-kph',
help=('Set a maximum speed in km/hour for scanner ' +
'movement, for high-level (L30) accounts.'),
type=int, default=25)
parser.add_argument('-ldur', '--lure-duration',
help=('Change duration for lures set on pokestops. ' +
'This is useful for events that extend lure ' +
'duration.'), type=int, default=30)
parser.add_argument('--dump-spawnpoints',
help=('Dump the spawnpoints from the db to json ' +
'(only for use with -ss).'),
action='store_true', default=False)
parser.add_argument('-pd', '--purge-data',
help=('Clear Pokemon from database this many hours ' +
'after they disappear (0 to disable).'),
type=int, default=0)
parser.add_argument('-px', '--proxy',
help='Proxy url (e.g. socks5://127.0.0.1:9050)',
action='append')
parser.add_argument('-pxsc', '--proxy-skip-check',
help='Disable checking of proxies before start.',
action='store_true', default=False)
parser.add_argument('-pxt', '--proxy-timeout',
help='Timeout settings for proxy checker in seconds.',
type=int, default=5)
parser.add_argument('-pxd', '--proxy-display',
help=('Display info on which proxy being used ' +
'(index or full). To be used with -ps.'),
type=str, default='index')
parser.add_argument('-pxf', '--proxy-file',
help=('Load proxy list from text file (one proxy ' +
'per line), overrides -px/--proxy.'))
parser.add_argument('-pxr', '--proxy-refresh',
help=('Period of proxy file reloading, in seconds. ' +
'Works only with -pxf/--proxy-file. ' +
'(0 to disable).'),
type=int, default=0)
parser.add_argument('-pxo', '--proxy-rotation',
help=('Enable proxy rotation with account changing ' +
'for search threads (none/round/random).'),
type=str, default='none')
parser.add_argument('--db-type',
help='Type of database to be used (default: sqlite).',
default='sqlite')
parser.add_argument('--db-name', help='Name of the database to be used.')
parser.add_argument('--db-user', help='Username for the database.')
parser.add_argument('--db-pass', help='Password for the database.')
parser.add_argument('--db-host', help='IP or hostname for the database.')
parser.add_argument(
'--db-port', help='Port for the database.', type=int, default=3306)
parser.add_argument('--db-max_connections',
help='Max connections (per thread) for the database.',
type=int, default=5)
parser.add_argument('--db-threads',
help=('Number of db threads; increase if the db ' +
'queue falls behind.'),
type=int, default=1)
parser.add_argument('-wh', '--webhook',
help='Define URL(s) to POST webhook information to.',
default=None, dest='webhooks', action='append')
parser.add_argument('-gi', '--gym-info',
help=('Get all details about gyms (causes an ' +
'additional API hit for every gym).'),
action='store_true', default=False)
parser.add_argument('--disable-clean', help='Disable clean db loop.',
action='store_true', default=False)
parser.add_argument('--webhook-updates-only',
help='Only send updates (Pokemon & lured pokestops).',
action='store_true', default=False)
parser.add_argument('--wh-threads',
help=('Number of webhook threads; increase if the ' +
'webhook queue falls behind.'),
type=int, default=1)
parser.add_argument('-whc', '--wh-concurrency',
help=('Async requests pool size.'), type=int,
default=25)
parser.add_argument('-whr', '--wh-retries',
help=('Number of times to retry sending webhook ' +
'data on failure.'),
type=int, default=3)
parser.add_argument('-wht', '--wh-timeout',
help='Timeout (in seconds) for webhook requests.',
type=float, default=1.0)
parser.add_argument('-whbf', '--wh-backoff-factor',
help=('Factor (in seconds) by which the delay ' +
'until next retry will increase.'),
type=float, default=0.25)
parser.add_argument('-whlfu', '--wh-lfu-size',
help='Webhook LFU cache max size.', type=int,
default=2500)
parser.add_argument('-whsu', '--webhook-scheduler-updates',
help=('Send webhook updates with scheduler status ' +
'(use with -wh).'),
action='store_true', default=True)
parser.add_argument('--ssl-certificate',
help='Path to SSL certificate file.')
parser.add_argument('--ssl-privatekey',
help='Path to SSL private key file.')
parser.add_argument('-ps', '--print-status',
help=('Show a status screen instead of log ' +
'messages. Can switch between status and ' +
'logs by pressing enter. Optionally specify ' +
'"logs" to startup in logging mode.'),
nargs='?', const='status', default=False,
metavar='logs')
parser.add_argument('-slt', '--stats-log-timer',
help='In log view, list per hr stats every X seconds',
type=int, default=0)
parser.add_argument('-sn', '--status-name', default=None,
help=('Enable status page database update using ' +
'STATUS_NAME as main worker name.'))
parser.add_argument('-spp', '--status-page-password', default=None,
help='Set the status page password.')
parser.add_argument('-hk', '--hash-key', default=None, action='append',
help='Key for hash server')
parser.add_argument('-tut', '--complete-tutorial', action='store_true',
help=("Complete ToS and tutorial steps on accounts " +
"if they haven't already."),
default=False)
parser.add_argument('-novc', '--no-version-check', action='store_true',
help='Disable API version check.',
default=False)
parser.add_argument('-vci', '--version-check-interval', type=int,
help='Interval to check API version in seconds ' +
'(Default: in [60, 300]).',
default=random.randint(60, 300))
parser.add_argument('-el', '--encrypt-lib',
help=('Path to encrypt lib to be used instead of ' +
'the shipped ones.'))
parser.add_argument('-odt', '--on-demand_timeout',
help=('Pause searching while web UI is inactive ' +
'for this timeout (in seconds).'),
type=int, default=0)
parser.add_argument('--disable-blacklist',
help=('Disable the global anti-scraper IP blacklist.'),
action='store_true', default=False)
parser.add_argument('-tp', '--trusted-proxies', default=[],
action='append',
help=('Enables the use of X-FORWARDED-FOR headers ' +
'to identify the IP of clients connecting ' +
'through these trusted proxies.'))
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('-v', '--verbose',
help=('Show debug messages from RocketMap ' +
'and pgoapi. Optionally specify file ' +
'to log to.'),
nargs='?', const='nofile', default=False,
metavar='filename.log')
verbosity.add_argument('-vv', '--very-verbose',
help=('Like verbose, but show debug messages ' +
'from all modules as well. Optionally ' +
'specify file to log to.'),
nargs='?', const='nofile', default=False,
metavar='filename.log')
parser.set_defaults(DEBUG=False)
args = parser.parse_args()
if args.only_server:
if args.location is None:
parser.print_usage()
print(sys.argv[0] +
": error: arguments -l/--location is required.")
sys.exit(1)
else:
# If using a CSV file, add the data where needed into the username,
# password and auth_service arguments.
# CSV file should have lines like "ptc,username,password",
# "username,password" or "username".
if args.accountcsv is not None:
# Giving num_fields something it would usually not get.
num_fields = -1
with open(args.accountcsv, 'r') as f:
for num, line in enumerate(f, 1):
fields = []
# First time around populate num_fields with current field
# count.
if num_fields < 0:
num_fields = line.count(',') + 1
csv_input = []
csv_input.append('')
csv_input.append('<username>')
csv_input.append('<username>,<password>')
csv_input.append('<ptc/google>,<username>,<password>')
# If the number of fields is differend this is not a CSV.
if num_fields != line.count(',') + 1:
print(sys.argv[0] +
": Error parsing CSV file on line " + str(num) +
". Your file started with the following " +
"input, '" + csv_input[num_fields] +
"' but now you gave us '" +
csv_input[line.count(',') + 1] + "'.")
sys.exit(1)
field_error = ''
line = line.strip()
# Ignore blank lines and comment lines.
if len(line) == 0 or line.startswith('#'):
continue
# If number of fields is more than 1 split the line into
# fields and strip them.
if num_fields > 1:
fields = line.split(",")
fields = map(str.strip, fields)
# If the number of fields is one then assume this is
# "username". As requested.
if num_fields == 1:
# Empty lines are already ignored.
args.username.append(line)
# If the number of fields is two then assume this is
# "username,password". As requested.
if num_fields == 2:
# If field length is not longer than 0 something is
# wrong!
if len(fields[0]) > 0:
args.username.append(fields[0])
else:
field_error = 'username'
# If field length is not longer than 0 something is
# wrong!
if len(fields[1]) > 0:
args.password.append(fields[1])
else:
field_error = 'password'
# If the number of fields is three then assume this is
# "ptc,username,password". As requested.
if num_fields == 3:
# If field 0 is not ptc or google something is wrong!
if (fields[0].lower() == 'ptc' or
fields[0].lower() == 'google'):
args.auth_service.append(fields[0])
else:
field_error = 'method'
# If field length is not longer then 0 something is
# wrong!
if len(fields[1]) > 0:
args.username.append(fields[1])
else:
field_error = 'username'
# If field length is not longer then 0 something is
# wrong!
if len(fields[2]) > 0:
args.password.append(fields[2])
else:
field_error = 'password'
if num_fields > 3:
print(('Too many fields in accounts file: max ' +
'supported are 3 fields. ' +
'Found {} fields').format(num_fields))
sys.exit(1)
# If something is wrong display error.
if field_error != '':
type_error = 'empty!'
if field_error == 'method':
type_error = (
'not ptc or google instead we got \'' +
fields[0] + '\'!')
print(sys.argv[0] +
": Error parsing CSV file on line " + str(num) +
". We found " + str(num_fields) + " fields, " +
"so your input should have looked like '" +
csv_input[num_fields] + "'\nBut you gave us '" +
line + "', your " + field_error +
" was " + type_error)
sys.exit(1)
errors = []
num_auths = len(args.auth_service)
num_usernames = 0
num_passwords = 0
if len(args.username) == 0:
errors.append(
'Missing `username` either as -u/--username, csv file ' +
'using -ac, or in config.')
else:
num_usernames = len(args.username)
if args.location is None:
errors.append(
'Missing `location` either as -l/--location or in config.')
if len(args.password) == 0:
errors.append(
'Missing `password` either as -p/--password, csv file, ' +
'or in config.')
else:
num_passwords = len(args.password)
if args.step_limit is None:
errors.append(
'Missing `step_limit` either as -st/--step-limit or ' +
'in config.')
if num_auths == 0:
args.auth_service = ['ptc']
num_auths = len(args.auth_service)
if num_usernames > 1:
if num_passwords > 1 and num_usernames != num_passwords:
errors.append((
'The number of provided passwords ({}) must match the ' +
'username count ({})').format(num_passwords,
num_usernames))
if num_auths > 1 and num_usernames != num_auths:
errors.append((
'The number of provided auth ({}) must match the ' +
'username count ({}).').format(num_auths, num_usernames))
if len(errors) > 0:
parser.print_usage()
print(sys.argv[0] + ": errors: \n - " + "\n - ".join(errors))
sys.exit(1)
# Fill the pass/auth if set to a single value.
if num_passwords == 1:
args.password = [args.password[0]] * num_usernames
if num_auths == 1:
args.auth_service = [args.auth_service[0]] * num_usernames
# Make the accounts list.
args.accounts = []
for i, username in enumerate(args.username):
args.accounts.append({'username': username,
'password': args.password[i],
'auth_service': args.auth_service[i]})
# Prepare the L30 accounts for the account sets.
args.accounts_L30 = []
if args.high_lvl_accounts:
# Context processor.
with open(args.high_lvl_accounts, 'r') as accs:
for line in accs:
# Make sure it's not an empty line.
if not line.strip():
continue
line = line.split(',')
# We need "service, user, pass".
if len(line) < 3:
raise Exception('L30 account is missing a'
+ ' field. Each line requires: '
+ '"service,user,pass".')
# Let's remove trailing whitespace.
service = line[0].strip()
username = line[1].strip()
password = line[2].strip()
hlvl_account = {
'auth_service': service,
'username': username,
'password': password,
'captcha': False
}
args.accounts_L30.append(hlvl_account)
# Prepare the IV/CP scanning filters.
args.enc_whitelist = []
# IV/CP scanning.
if args.enc_whitelist_file:
with open(args.enc_whitelist_file) as f:
args.enc_whitelist = frozenset([int(l.strip()) for l in f])
# Make max workers equal number of accounts if unspecified, and disable
# account switching.
if args.workers is None:
args.workers = len(args.accounts)
args.account_search_interval = None
# Disable search interval if 0 specified.
if args.account_search_interval == 0:
args.account_search_interval = None
# Make sure we don't have an empty account list after adding command
# line and CSV accounts.
if len(args.accounts) == 0:
print(sys.argv[0] +
": Error: no accounts specified. Use -a, -u, and -p or " +
"--accountcsv to add accounts.")
sys.exit(1)
if args.webhook_whitelist_file:
with open(args.webhook_whitelist_file) as f:
args.webhook_whitelist = frozenset(
[int(p_id.strip()) for p_id in f])
elif args.webhook_blacklist_file:
with open(args.webhook_blacklist_file) as f:
args.webhook_blacklist = frozenset(
[int(p_id.strip()) for p_id in f])
else:
args.webhook_blacklist = frozenset(
[int(i) for i in args.webhook_blacklist])
args.webhook_whitelist = frozenset(
[int(i) for i in args.webhook_whitelist])
# Decide which scanning mode to use.
if args.spawnpoint_scanning:
args.scheduler = 'SpawnScan'
elif args.skip_empty:
args.scheduler = 'HexSearchSpawnpoint'
elif args.speed_scan:
args.scheduler = 'SpeedScan'
else:
args.scheduler = 'HexSearch'
# Disable webhook scheduler updates if webhooks are disabled
if args.webhooks is None:
args.webhook_scheduler_updates = False
return args
def now():
# The fact that you need this helper...
return int(time.time())
# Gets the seconds past the hour.
def cur_sec():
return (60 * time.gmtime().tm_min) + time.gmtime().tm_sec
# Gets the total seconds past the hour for a given date.
def date_secs(d):
return d.minute * 60 + d.second
# Checks to see if test is between start and end accounting for hour
# wraparound.
def clock_between(start, test, end):
return ((start <= test <= end and start < end) or
(not (end <= test <= start) and start > end))
# Return the s2sphere cellid token from a location.
def cellid(loc):
return CellId.from_lat_lng(LatLng.from_degrees(loc[0], loc[1])).to_token()
# Return equirectangular approximation distance in km.
def equi_rect_distance(loc1, loc2):
R = 6371 # Radius of the earth in km.
lat1 = math.radians(loc1[0])
lat2 = math.radians(loc2[0])
x = (math.radians(loc2[1]) - math.radians(loc1[1])
) * math.cos(0.5 * (lat2 + lat1))
y = lat2 - lat1
return R * math.sqrt(x * x + y * y)
# Return True if distance between two locs is less than distance in km.
def in_radius(loc1, loc2, distance):
return equi_rect_distance(loc1, loc2) < distance
def i8ln(word):
if config['LOCALE'] == "en":
return word
if not hasattr(i8ln, 'dictionary'):
file_path = os.path.join(
config['ROOT_PATH'],
config['LOCALES_DIR'],
'{}.min.json'.format(config['LOCALE']))
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
i8ln.dictionary = json.loads(f.read())
else:
log.warning(
'Skipping translations - unable to find locale file: %s',
file_path)
return word
if word in i8ln.dictionary:
return i8ln.dictionary[word]
else:
log.debug('Unable to find translation for "%s" in locale %s!',
word, config['LOCALE'])
return word
def get_pokemon_data(pokemon_id):
if not hasattr(get_pokemon_data, 'pokemon'):
file_path = os.path.join(
config['ROOT_PATH'],
config['DATA_DIR'],
'pokemon.min.json')
with open(file_path, 'r') as f:
get_pokemon_data.pokemon = json.loads(f.read())
return get_pokemon_data.pokemon[str(pokemon_id)]
def get_pokemon_id(pokemon_name):
if not hasattr(get_pokemon_id, 'ids'):
if not hasattr(get_pokemon_data, 'pokemon'):
# initialize from file
get_pokemon_data(1)
get_pokemon_id.ids = {}
for pokemon_id, data in get_pokemon_data.pokemon.iteritems():
get_pokemon_id.ids[data['name']] = int(pokemon_id)
return get_pokemon_id.ids.get(pokemon_name, -1)
def get_pokemon_name(pokemon_id):
return i8ln(get_pokemon_data(pokemon_id)['name'])
def get_pokemon_rarity(pokemon_id):
return i8ln(get_pokemon_data(pokemon_id)['rarity'])
def get_pokemon_types(pokemon_id):
pokemon_types = get_pokemon_data(pokemon_id)['types']
return map(lambda x: {"type": i8ln(x['type']), "color": x['color']},
pokemon_types)
def get_moves_data(move_id):
if not hasattr(get_moves_data, 'moves'):
file_path = os.path.join(
config['ROOT_PATH'],
config['DATA_DIR'],
'moves.min.json')
with open(file_path, 'r') as f:
get_moves_data.moves = json.loads(f.read())
return get_moves_data.moves[str(move_id)]
def get_move_name(move_id):
return i8ln(get_moves_data(move_id)['name'])
def get_move_damage(move_id):
return i8ln(get_moves_data(move_id)['damage'])
def get_move_energy(move_id):
return i8ln(get_moves_data(move_id)['energy'])
def get_move_type(move_id):
move_type = get_moves_data(move_id)['type']
return {"type": i8ln(move_type), "type_en": move_type}
def dottedQuadToNum(ip):
return struct.unpack("!L", socket.inet_aton(ip))[0]
def get_blacklist():
try:
url = 'https://blist.devkat.org/blacklist.json'
blacklist = requests.get(url, timeout=5).json()
log.debug('Entries in blacklist: %s.', len(blacklist))
return blacklist
except (requests.exceptions.RequestException, IndexError, KeyError):
log.error('Unable to retrieve blacklist, setting to empty.')
return []
# Generate random device info.
# Original by Noctem.
IPHONES = {'iPhone5,1': 'N41AP',
'iPhone5,2': 'N42AP',
'iPhone5,3': 'N48AP',
'iPhone5,4': 'N49AP',
'iPhone6,1': 'N51AP',
'iPhone6,2': 'N53AP',
'iPhone7,1': 'N56AP',
'iPhone7,2': 'N61AP',
'iPhone8,1': 'N71AP',
'iPhone8,2': 'N66AP',
'iPhone8,4': 'N69AP',
'iPhone9,1': 'D10AP',
'iPhone9,2': 'D11AP',
'iPhone9,3': 'D101AP',
'iPhone9,4': 'D111AP'}
def generate_device_info():
device_info = {'device_brand': 'Apple', 'device_model': 'iPhone',
'hardware_manufacturer': 'Apple',
'firmware_brand': 'iPhone OS'}
devices = tuple(IPHONES.keys())
ios8 = ('8.0', '8.0.1', '8.0.2', '8.1', '8.1.1',
'8.1.2', '8.1.3', '8.2', '8.3', '8.4', '8.4.1')
ios9 = ('9.0', '9.0.1', '9.0.2', '9.1', '9.2', '9.2.1',
'9.3', '9.3.1', '9.3.2', '9.3.3', '9.3.4', '9.3.5')
ios10 = ('10.0', '10.0.1', '10.0.2', '10.0.3', '10.1', '10.1.1')
device_info['device_model_boot'] = random.choice(devices)
device_info['hardware_model'] = IPHONES[device_info['device_model_boot']]
device_info['device_id'] = uuid4().hex
if device_info['hardware_model'] in ('iPhone9,1', 'iPhone9,2',
'iPhone9,3', 'iPhone9,4'):
device_info['firmware_type'] = random.choice(ios10)
elif device_info['hardware_model'] in ('iPhone8,1', 'iPhone8,2',
'iPhone8,4'):
device_info['firmware_type'] = random.choice(ios9 + ios10)
else:
device_info['firmware_type'] = random.choice(ios8 + ios9 + ios10)
return device_info
def extract_sprites(root_path):
zip_path = os.path.join(
root_path,
'static01.zip')
extract_path = os.path.join(
root_path,
'static')
log.debug('Extracting sprites from "%s" to "%s"', zip_path, extract_path)
zip = zipfile.ZipFile(zip_path, 'r')
zip.extractall(extract_path)
zip.close()
def clear_dict_response(response, keep_inventory=False):
if 'platform_returns' in response:
del response['platform_returns']
if 'responses' not in response:
return response
if 'GET_INVENTORY' in response['responses'] and not keep_inventory:
del response['responses']['GET_INVENTORY']
if 'GET_HATCHED_EGGS' in response['responses']:
del response['responses']['GET_HATCHED_EGGS']
if 'CHECK_AWARDED_BADGES' in response['responses']:
del response['responses']['CHECK_AWARDED_BADGES']
if 'DOWNLOAD_SETTINGS' in response['responses']:
del response['responses']['DOWNLOAD_SETTINGS']
if 'GET_BUDDY_WALKED' in response['responses']:
del response['responses']['GET_BUDDY_WALKED']
return response
def calc_pokemon_level(cp_multiplier):
if cp_multiplier < 0.734:
pokemon_level = (58.35178527 * cp_multiplier * cp_multiplier -
2.838007664 * cp_multiplier + 0.8539209906)
else:
pokemon_level = 171.0112688 * cp_multiplier - 95.20425243
pokemon_level = int((round(pokemon_level) * 2) / 2)
return pokemon_level
|
pgandev/RocketMap
|
pogom/utils.py
|
Python
|
agpl-3.0
| 43,916
|
"""Define patches used for androidtv tests."""
from unittest.mock import mock_open, patch
KEY_PYTHON = "python"
KEY_SERVER = "server"
ADB_DEVICE_TCP_ASYNC_FAKE = "AdbDeviceTcpAsyncFake"
DEVICE_ASYNC_FAKE = "DeviceAsyncFake"
class AdbDeviceTcpAsyncFake:
"""A fake of the `adb_shell.adb_device_async.AdbDeviceTcpAsync` class."""
def __init__(self, *args, **kwargs):
"""Initialize a fake `adb_shell.adb_device_async.AdbDeviceTcpAsync` instance."""
self.available = False
async def close(self):
"""Close the socket connection."""
self.available = False
async def connect(self, *args, **kwargs):
"""Try to connect to a device."""
raise NotImplementedError
async def shell(self, cmd, *args, **kwargs):
"""Send an ADB shell command."""
return None
class ClientAsyncFakeSuccess:
"""A fake of the `ClientAsync` class when the connection and shell commands succeed."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeSuccess` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is connected via ADB."""
device = DeviceAsyncFake(serial)
self._devices.append(device)
return device
class ClientAsyncFakeFail:
"""A fake of the `ClientAsync` class when the connection and shell commands fail."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeFail` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is not connected via ADB."""
self._devices = []
return None
class DeviceAsyncFake:
"""A fake of the `DeviceAsync` class."""
def __init__(self, host):
"""Initialize a `DeviceAsyncFake` instance."""
self.host = host
async def shell(self, cmd):
"""Send an ADB shell command."""
raise NotImplementedError
def patch_connect(success):
"""Mock the `adb_shell.adb_device_async.AdbDeviceTcpAsync` and `ClientAsync` classes."""
async def connect_success_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it succeeds."""
self.available = True
async def connect_fail_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it fails."""
raise OSError
if success:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect",
connect_success_python,
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync",
ClientAsyncFakeSuccess,
),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect", connect_fail_python
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync", ClientAsyncFakeFail
),
}
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods."""
async def shell_success(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
async def shell_fail_python(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ValueError
async def shell_fail_server(self, cmd):
"""Mock the `DeviceAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_success
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_success),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_fail_python
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_fail_server),
}
PATCH_ADB_DEVICE_TCP = patch(
"androidtv.adb_manager.adb_manager_async.AdbDeviceTcpAsync", AdbDeviceTcpAsyncFake
)
PATCH_ANDROIDTV_OPEN = patch(
"homeassistant.components.androidtv.media_player.open", mock_open()
)
PATCH_KEYGEN = patch("homeassistant.components.androidtv.media_player.keygen")
PATCH_SIGNER = patch(
"homeassistant.components.androidtv.media_player.ADBPythonSync.load_adbkey",
return_value="signer for testing",
)
def isfile(filepath):
"""Mock `os.path.isfile`."""
return filepath.endswith("adbkey")
PATCH_ISFILE = patch("os.path.isfile", isfile)
PATCH_ACCESS = patch("os.access", return_value=True)
def patch_firetv_update(state, current_app, running_apps, hdmi_input):
"""Patch the `FireTV.update()` method."""
return patch(
"androidtv.firetv.firetv_async.FireTVAsync.update",
return_value=(state, current_app, running_apps, hdmi_input),
)
def patch_androidtv_update(
state, current_app, running_apps, device, is_volume_muted, volume_level, hdmi_input
):
"""Patch the `AndroidTV.update()` method."""
return patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
return_value=(
state,
current_app,
running_apps,
device,
is_volume_muted,
volume_level,
hdmi_input,
),
)
PATCH_LAUNCH_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.launch_app")
PATCH_STOP_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.stop_app")
# Cause the update to raise an unexpected type of exception
PATCH_ANDROIDTV_UPDATE_EXCEPTION = patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
side_effect=ZeroDivisionError,
)
|
jawilson/home-assistant
|
tests/components/androidtv/patchers.py
|
Python
|
apache-2.0
| 6,081
|
#!/usr/bin/env python
from distutils.core import setup
import fedex
LONG_DESCRIPTION = open('README.rst').read()
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
KEYWORDS = 'fedex soap suds wrapper'
setup(name='fedex',
version=fedex.VERSION,
description='Fedex Web Services API wrapper.',
long_description=LONG_DESCRIPTION,
author='Gregory Taylor',
author_email='gtaylor@gc-taylor.com',
url='https://github.com/gtaylor/python-fedex',
download_url='http://pypi.python.org/pypi/fedex/',
packages=['fedex', 'fedex.services', 'fedex.printers'],
package_dir={'fedex': 'fedex'},
package_data={'fedex': ['wsdl/*.wsdl', 'wsdl/test_server_wsdl/*.wsdl']},
platforms=['Platform Independent'],
license='BSD',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
requires=['suds'],
install_requires=['suds'],
)
|
obr/python-fedex
|
setup.py
|
Python
|
bsd-3-clause
| 1,212
|
# This is the Twisted Get Poetry Now! client, version 1.0.
# NOTE: This should not be used as the basis for production code.
# It uses low-level Twisted APIs as a learning exercise.
import datetime, errno, optparse, socket
from twisted.internet import main
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
This is the Get Poetry Now! client, Twisted version 1.0.
Run it like this:
python get-poetry.py port1 port2 port3 ...
If you are in the base directory of the twisted-intro package,
you could run it like this:
python twisted-client-1/get-poetry.py 10001 10002 10003
to grab poetry from servers on ports 10001, 10002, and 10003.
Of course, there need to be servers listening on those ports
for that to work.
"""
parser = optparse.OptionParser(usage)
_, addresses = parser.parse_args()
if not addresses:
print parser.format_help()
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
class PoetrySocket(object):
poem = ''
def __init__(self, task_num, address):
self.task_num = task_num
self.address = address
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(address)
self.sock.setblocking(0)
# tell the Twisted reactor to monitor this socket for reading
from twisted.internet import reactor
reactor.addReader(self)
def fileno(self):
try:
return self.sock.fileno()
except socket.error:
return -1
def connectionLost(self, reason):
self.sock.close()
# stop monitoring this socket
from twisted.internet import reactor
reactor.removeReader(self)
# see if there are any poetry sockets left
for reader in reactor.getReaders():
if isinstance(reader, PoetrySocket):
return
reactor.stop() # no more poetry
def doRead(self):
bytes = ''
while True:
try:
bytesread = self.sock.recv(1024)
if not bytesread:
break
else:
bytes += bytesread
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
break
return main.CONNECTION_LOST
if not bytes:
print 'Task %d finished' % self.task_num
return main.CONNECTION_DONE
else:
msg = 'Task %d: got %d bytes of poetry from %s'
print msg % (self.task_num, len(bytes), self.format_addr())
self.poem += bytes
def logPrefix(self):
return 'poetry'
def format_addr(self):
host, port = self.address
return '%s:%s' % (host or '127.0.0.1', port)
def poetry_main():
addresses = parse_args()
start = datetime.datetime.now()
sockets = [PoetrySocket(i + 1, addr) for i, addr in enumerate(addresses)]
print socket
from twisted.internet import reactor
reactor.run()
elapsed = datetime.datetime.now() - start
for i, sock in enumerate(sockets):
print 'Task %d: %d bytes of poetry' % (i + 1, len(sock.poem))
print 'Got %d poems in %s' % (len(addresses), elapsed)
if __name__ == '__main__':
poetry_main()
|
GavinCando/twisted_test
|
twisted-client-1/get-poetry.py
|
Python
|
mit
| 3,576
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta 4
# Copyright 2015 tvalacarta@gmail.com
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#
# Distributed under the terms of GNU General Public License v3 (GPLv3)
# http://www.gnu.org/licenses/gpl-3.0.html
# ------------------------------------------------------------
# This file is part of pelisalacarta 4.
#
# pelisalacarta 4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pelisalacarta 4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pelisalacarta 4. If not, see <http://www.gnu.org/licenses/>.
# ------------------------------------------------------------
# MCT - Mini Cliente Torrent para pelisalacarta
#------------------------------------------------------------
import os
import re
import shutil
import tempfile
import urllib
import urllib2
try:
from python_libtorrent import get_libtorrent
lt = get_libtorrent()
except Exception, e:
import libtorrent as lt
import xbmc
import xbmcgui
from core import config
from core import scrapertools
from core import filetools
def play(url, xlistitem, is_view=None, subtitle=""):
# -- Necesario para algunas webs ----------------------------
if not url.endswith(".torrent") and not url.startswith("magnet"):
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
t_file = scrapertools.get_header_from_response(url, header_to_get="location")
if len(t_file) > 0:
url = t_file
# -- Crear dos carpetas en descargas para los archivos ------
save_path_videos = os.path.join( config.get_setting("downloadpath") , "torrent-videos" )
save_path_torrents = os.path.join( config.get_setting("downloadpath") , "torrent-torrents" )
if not os.path.exists( save_path_torrents ): os.mkdir(save_path_torrents)
# -- Usar - archivo torrent desde web, meagnet o HD ---------
if not os.path.isfile(url) and not url.startswith("magnet"):
# -- http - crear archivo torrent -----------------------
data = url_get(url)
# -- El nombre del torrent será el que contiene en los --
# -- datos. -
re_name = urllib.unquote( scrapertools.get_match(data,':name\d+:(.*?)\d+:') )
#torrent_file = os.path.join(save_path_torrents, re_name+'.torrent')
torrent_file = filetools.join(save_path_torrents, unicode(re_name, "'utf-8'", errors="replace")+'.torrent')
f = open(torrent_file,'wb')
f.write(data)
f.close()
elif os.path.isfile(url):
# -- file - para usar torrens desde el HD ---------------
torrent_file = url
else:
# -- magnet ---------------------------------------------
torrent_file = url
# -----------------------------------------------------------
# -- MCT - MiniClienteTorrent -------------------------------
ses = lt.session()
print "### Init session ########"
print lt.version
print "#########################"
ses.add_dht_router("router.bittorrent.com",6881)
ses.add_dht_router("router.utorrent.com",6881)
ses.add_dht_router("router.bitcomet.com",554)
ses.add_dht_router("dht.transmissionbt.com",6881)
trackers = [
"http://exodus.desync.com:6969/announce",
"udp://tracker.publicbt.com:80/announce",
"udp://tracker.openbittorrent.com:80/announce",
"http://tracker.torrentbay.to:6969/announce",
"http://fr33dom.h33t.com:3310/announce",
"http://tracker.pow7.com/announce",
"udp://tracker.ccc.de:80/announce",
"http://tracker.bittorrent.am:80/announce",
"http://denis.stalker.h3q.com:6969/announce",
"udp://tracker.prq.to:80/announce",
"udp://tracker.istole.it:80/announce",
"udp://open.demonii.com:1337",
"http://9.rarbg.com:2710/announce",
"http://announce.torrentsmd.com:6969/announce",
"http://bt.careland.com.cn:6969/announce",
"http://explodie.org:6969/announce",
"http://mgtracker.org:2710/announce",
"http://tracker.best-torrents.net:6969/announce",
"http://tracker.tfile.me/announce",
"http://tracker.torrenty.org:6969/announce",
"http://tracker1.wasabii.com.tw:6969/announce",
"udp://9.rarbg.com:2710/announce",
"udp://9.rarbg.me:2710/announce",
"udp://coppersurfer.tk:6969/announce",
"udp://tracker.btzoo.eu:80/announce",
"http://www.spanishtracker.com:2710/announce",
"http://www.todotorrents.com:2710/announce",
]
video_file = ""
# -- magnet2torrent -----------------------------------------
if torrent_file.startswith("magnet"):
tempdir = tempfile.mkdtemp()
params = {
'save_path': tempdir,
'trackers':trackers,
'storage_mode': lt.storage_mode_t.storage_mode_allocate,
'paused': False,
'auto_managed': True,
'duplicate_is_error': True
}
h = lt.add_magnet_uri(ses, torrent_file, params)
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
while not h.has_metadata():
message, porcent, msg_file, s, download = getProgress(h, "Creando torrent desde magnet")
dp.update(porcent, message, msg_file)
if s.state == 1: download = 1
if dp.iscanceled():
dp.close()
remove_files( download, torrent_file, video_file, ses, h )
return
dp.close()
info = h.get_torrent_info()
data = lt.bencode( lt.create_torrent(info).generate() )
#torrent_file = os.path.join(save_path_torrents, info.name() + ".torrent")
torrent_file = os.path.join(save_path_torrents, unicode(info.name(), "'utf-8'", errors="replace") + ".torrent")
f = open(torrent_file,'wb')
f.write(data)
f.close()
ses.remove_torrent(h)
shutil.rmtree(tempdir)
# -----------------------------------------------------------
# -- Archivos torrent ---------------------------------------
e = lt.bdecode(open(torrent_file, 'rb').read())
info = lt.torrent_info(e)
# -- El más gordo o uno de los más gordo se entiende que es -
# -- el vídeo o es el vídeo que se usará como referencia -
# -- para el tipo de archivo -
print "##### Archivos ## %s ##" % len(info.files())
_index_file, _video_file, _size_file = get_video_file(info)
_video_file_ext = os.path.splitext( _video_file )[1]
if _video_file_ext == ".avi" or _video_file_ext == ".mp4":
print "##### storage_mode_t.storage_mode_allocate ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_allocate } )
else:
print "##### storage_mode: none ("+_video_file_ext+") #####"
h = ses.add_torrent( { 'ti':info, 'save_path': save_path_videos, 'trackers':trackers, 'storage_mode':lt.storage_mode_t.storage_mode_sparse } )
# -----------------------------------------------------------
# -- Descarga secuencial - trozo 1, trozo 2, ... ------------
h.set_sequential_download(True)
h.force_reannounce()
h.force_dht_announce()
# -- Prioritarizar/Seleccionar archivo-----------------------
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1:
_index = _index_file
video_file = _video_file
video_size = _size_file
# -- Inicio de variables para 'pause' automático cuando el -
# -- el vídeo se acerca a una pieza sin completar -
is_greater_num_pieces = False
is_greater_num_pieces_plus = False
is_greater_num_pieces_pause = False
#porcent4first_pieces = int( video_size / 1073741824 )
porcent4first_pieces = int( video_size * 0.000000005 )
if porcent4first_pieces < 10: porcent4first_pieces = 10
if porcent4first_pieces > 100: porcent4first_pieces = 100
#num_pieces_to_resume = int( video_size / 1610612736 )
num_pieces_to_resume = int( video_size * 0.0000000025 )
if num_pieces_to_resume < 5: num_pieces_to_resume = 5
if num_pieces_to_resume > 25: num_pieces_to_resume = 25
print "##### porcent4first_pieces ## %s ##" % porcent4first_pieces
print "##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume
# -- Prioritarizar o seleccionar las piezas del archivo que -
# -- se desea reproducir con 'file_priorities' -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
# -- Crear diálogo de progreso para el primer bucle ---------
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
_pieces_info = {}
# -- Doble bucle anidado ------------------------------------
# -- Descarga - Primer bucle -
while not h.is_seed():
s = h.status()
xbmc.sleep(100)
# -- Recuperar los datos del progreso -------------------
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
# -- Si hace 'checking' existe descarga -----------------
# -- 'download' Se usará para saber si hay datos -
# -- descargados para el diálogo de 'remove_files' -
if s.state == 1: download = 1
# -- Player - play --------------------------------------
# -- Comprobar si se han completado las piezas para el -
# -- inicio del vídeo ............... -
first_pieces = True
_p = ""
_c = 0
for i in range( piece_set[0], piece_set[porcent4first_pieces] ):
_p+= "[%s:%s]" % ( i, h.have_piece(i) )
first_pieces&= h.have_piece(i)
if h.have_piece(i): _c+= 1
_pieces_info = {'current': 0, 'continuous': "%s/%s" % (_c,porcent4first_pieces), 'have': h.status().num_pieces, 'len': len(piece_set)}
_p = "##### first_pieces [%s/%s][%s]: " % ( _c, porcent4first_pieces, len(piece_set) ) + _p
print _p
# -- -------------------------------------------------- -
if is_view != "Ok" and first_pieces:
print "##### porcent [%.2f%%]" % (s.progress * 100)
is_view = "Ok"
dp.close()
# -- Player - Ver el vídeo --------------------------
playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
playlist.clear()
#ren_video_file = os.path.join( save_path_videos, video_file ).replace('\\','\\\\')
ren_video_file = os.path.join( save_path_videos, video_file )
playlist.add( ren_video_file, xlistitem )
#playlist.add( os.path.join( save_path_videos, video_file ), xlistitem )
#playlist.add( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20'), xlistitem )
player = play_video( xbmc.PLAYER_CORE_AUTO )
player.play(playlist)
'''
# -- Player - Ver el vídeo --------------------------
player = play_video()
#player.play( os.path.join( save_path_videos, video_file ) )
player.play( "http://192.168.0.200/mctplay/" + video_file.replace(' ','%20') )
'''
#player.play( os.path.join( save_path_videos, video_file ) )
# -- Contador de cancelaciones para la ventana de -
# -- 'pause' automático -
is_greater_num_pieces_canceled = 0
continuous_pieces = 0
porcent_time = 0.00
current_piece = 0
# -- Impedir que kodi haga 'resume' a un archivo ----
# -- que se reprodució con anterioridad y que se -
# -- eliminó para impedir que intente la reprucción -
# -- en una pieza que aún no se ha completado y se -
# -- active 'pause' automático -
not_resume = True
# -- Bandera subTítulos
_sub = False
# -- Segundo bucle - Player - Control de eventos ----
while player.isPlaying():
xbmc.sleep(100)
# -- Añadir subTítulos
if subtitle!="" and not _sub:
_sub = True
player.setSubtitles(subtitle)
# -- Impedir que kodi haga 'resume' al inicio ---
# -- de la descarga de un archivo conocido -
if not_resume:
player.seekTime(0)
not_resume = False
#xbmc.sleep(1000)
# -- Control 'pause' automático -
continuous_pieces = count_completed_continuous_pieces(h, piece_set)
if xbmc.Player().isPlaying():
# -- Porcentage del progreso del vídeo ------
porcent_time = player.getTime() / player.getTotalTime() * 100
# -- Pieza que se está reproduciendo --------
current_piece = int( porcent_time / 100 * len(piece_set) )
# -- Banderas de control --------------------
is_greater_num_pieces = (current_piece > continuous_pieces - num_pieces_to_resume)
is_greater_num_pieces_plus = (current_piece + porcent4first_pieces > continuous_pieces)
is_greater_num_pieces_finished = (current_piece + porcent4first_pieces >= len(piece_set))
# -- Activa 'pause' automático --------------
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
is_greater_num_pieces_pause = True
player.pause()
# -- Log ------------------------------------
_TotalTime = player.getTotalTime()
_Time = player.getTime()
_print_log = "\n##### Player ##################################"
_print_log+= "\nTamaño del vídeo: %s" % video_size
_print_log+= "\nTotal piezas: %s" % len(piece_set)
_print_log+= "\nPiezas contiguas: %s" % continuous_pieces
_print_log+= "\n-----------------------------------------------"
_print_log+= "\nVídeo-Total segundos: %s" % _TotalTime
_print_log+= "\nVídeo-Progreso segundos: %s" % _Time
_print_log+= "\nVídeo-Progreso porcentaje: %.2f%%" % porcent_time
_print_log+= "\n-----------------------------------------------"
_print_log+= "\ncurrent_piece: %s" % current_piece
_print_log+= "\nis_greater_num_pieces: %s" % is_greater_num_pieces
_print_log+= "\nis_greater_num_pieces_plus: %s" % is_greater_num_pieces_plus
_print_log+= "\nis_greater_num_pieces_pause: %s" % is_greater_num_pieces_pause
_print_log+= "\nis_greater_num_pieces_finished: %s" % is_greater_num_pieces_finished
_print_log+= "\nPieza que se está visionando: %.2f" % ( porcent_time / 100 * len(piece_set) )
_print_log+= "\nOffset que se está visionando: %.2f" % ( porcent_time / 100 * video_size )
if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
_print_log+= "\n+++++++++++++++++++++++++++++++++++++++++++++++"
_print_log+= "\nPausa con:"
_print_log+= "\n current_piece = %s" % current_piece
_print_log+= "\n continuous_pieces = %s" % continuous_pieces
_print_log+= "\n###############################################"
print _print_log
# -------------------------------------------
_pieces_info = {'current': current_piece, 'continuous': continuous_pieces, 'have': h.status().num_pieces, 'len': len(piece_set)}
# -- Cerrar el diálogo de progreso --------------
if player.resumed:
dp.close()
# -- Mostrar el diálogo de progreso -------------
if player.paused:
# -- Crear diálogo si no existe -------------
if not player.statusDialogoProgress:
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
player.setDialogoProgress()
# -- Diálogos de estado en el visionado -----
if not h.is_seed():
# -- Recuperar los datos del progreso ---
message, porcent, msg_file, s, download = getProgress(h, video_file, _pf=_pieces_info)
dp.update(porcent, message, msg_file)
else:
dp.update(100, "Descarga completa: " + video_file)
# -- Se canceló el progreso en el visionado -
# -- Continuar -
if dp.iscanceled():
dp.close()
player.pause()
# -- Se canceló el progreso en el visionado -
# -- en la ventana de 'pause' automático. -
# -- Parar si el contador llega a 3 -
if dp.iscanceled() and is_greater_num_pieces_pause:
is_greater_num_pieces_canceled+= 1
if is_greater_num_pieces_canceled == 3:
player.stop()
# -- Desactiva 'pause' automático y ---------
# -- reinicia el contador de cancelaciones -
if not dp.iscanceled() and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
dp.close()
player.pause()
is_greater_num_pieces_pause = False
is_greater_num_pieces_canceled = 0
# -- El usuario cancelo el visionado --------
# -- Terminar -
if player.ended:
# -- Diálogo eliminar archivos ----------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Kodi - Se cerró el visionado -----------------------
# -- Continuar | Terminar -
if is_view == "Ok" and not xbmc.Player().isPlaying():
if info.num_files() == 1:
# -- Diálogo continuar o terminar ---------------
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.', '¿Continuar con la sesión?')
else: ok = False
# -- SI ---------------------------------------------
if ok:
# -- Continuar: ---------------------------------
is_view=None
else:
# -- Terminar: ----------------------------------
# -- Comprobar si el vídeo pertenece a una ------
# -- lista de archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos --------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Mostar progeso antes del visionado -----------------
if is_view != "Ok" :
dp.update(porcent, message, msg_file)
# -- Se canceló el progreso antes del visionado ---------
# -- Terminar -
if dp.iscanceled():
dp.close()
# -- Comprobar si el vídeo pertenece a una lista de -
# -- archivos -
_index, video_file, video_size = get_video_files_sizes( info )
if _index == -1 or info.num_files() == 1:
# -- Diálogo eliminar archivos ------------------
remove_files( download, torrent_file, video_file, ses, h )
return
else:
# -- Lista de archivos. Diálogo de opciones -----
piece_set = set_priority_pieces(h, _index, video_file, video_size)
is_view=None
dp = xbmcgui.DialogProgress()
dp.create('pelisalacarta-MCT')
# -- Kodi - Error? - No debería llegar aquí -----------------
if is_view == "Ok" and not xbmc.Player().isPlaying():
dp.close()
# -- Diálogo eliminar archivos --------------------------
remove_files( download, torrent_file, video_file, ses, h )
return
# -- Progreso de la descarga ------------------------------------
def getProgress(h, video_file, _pf={}):
if len(_pf) > 0:
#_pf_msg = "[%s] [%s] [%s] [%s][CR]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
_pf_msg = "[%s] [%s] [%s] [%s]" % (_pf['current'], _pf['continuous'], _pf['have'], _pf['len'])
else: _pf_msg = ""
s = h.status()
state_str = ['queued', 'checking', 'downloading metadata', \
'downloading', 'finished', 'seeding', 'allocating', 'checking fastresume']
message = '%.2f%% d:%.1f kb/s u:%.1f kb/s p:%d s:%d %s' % \
(s.progress * 100, s.download_rate / 1000, s.upload_rate / 1000, \
s.num_peers, s.num_seeds, state_str[s.state])
porcent = int( s.progress * 100 )
download = ( s.progress * 100 )
if "/" in video_file: video_file = video_file.split("/")[1]
#msg_file = "..../"+video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
#msg_file = video_file + " - %.2f MB" % (s.total_wanted/1048576.0)
msg_file = video_file
#msg_file = "[%s] "%len(msg_file)+_pf_msg+msg_file
if len(msg_file) > 50:
msg_file = msg_file.replace( video_file, os.path.splitext(video_file)[0][:40] + "... " + os.path.splitext(video_file)[1] )
msg_file = msg_file + "[CR]" + "%.2f MB" % (s.total_wanted/1048576.0) + " - " + _pf_msg
return (message, porcent, msg_file, s, download)
# -- Clase play_video - Controlar eventos -----------------------
class play_video(xbmc.Player):
def __init__( self, *args, **kwargs ):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
self.ended = False
def onPlayBackPaused(self):
self.paused = True
self.resumed = False
def onPlayBackResumed(self):
self.paused = False
self.resumed = True
self.statusDialogoProgress = False
def is_paused(self):
return self.paused
def setDialogoProgress(self):
self.statusDialogoProgress = True
def is_started(self):
self.ended = False
def is_ended(self):
self.ended = True
# -- Conseguir el nombre un alchivo de vídeo del metadata -------
# -- El más gordo o uno de los más gordo se entiende que es el -
# -- vídeo o es vídeo que se usará como referencia para el tipo -
# -- de archivo -
def get_video_file( info ):
size_file = 0
for i, f in enumerate(info.files()):
if f.size > size_file:
video_file = f.path.replace("\\","/")
size_file = f.size
index_file = i
return index_file, video_file, size_file
# -- Listado de selección del vídeo a prioritarizar -------------
def get_video_files_sizes( info ):
opciones = []
vfile_name = {}
vfile_size = {}
for i, f in enumerate( info.files() ):
#_title = f.path
#try: _title = f.path.encode('iso-8859-1')
#except: _title = f.path.decode('utf-8')
#_title = f.path.encode('iso-8859-1')
_title = unicode(f.path, "iso-8859-1", errors="replace")
_title = unicode(f.path, "'utf-8'", errors="replace")
_title = re.sub(r'(.*? )- Temporada (\d+) Completa(.*?)',
r'\1T\2\3',
_title)
_title = re.sub(r'\s\([^\)]+\)|\s\-',
'',
_title)
info.rename_file( i, _title )
for i, f in enumerate( info.files() ):
_index = int(i)
_title = f.path.replace("\\","/")
_size = f.size
_offset = f.offset
_file_name = os.path.splitext( _title )[0]
if "/" in _file_name: _file_name = _file_name.split('/')[1]
_file_ext = os.path.splitext( _title )[1]
_caption = str(i) + \
" - " + \
_file_name + _file_ext + \
" - %.2f MB" % (_size / 1048576.0)
vfile_name[i] = _title
vfile_size[i] = _size
opciones.append(_caption)
if len(opciones) > 1:
d = xbmcgui.Dialog()
seleccion = d.select("pelisalacarta-MCT: Lista de vídeos", opciones)
else: seleccion = 0
if seleccion == -1:
vfile_name[seleccion] = ""
vfile_size[seleccion] = 0
return seleccion, vfile_name[seleccion], vfile_size[seleccion]
# -- Preguntar si se desea borrar lo descargado -----------------
def remove_files( download, torrent_file, video_file, ses, h ):
dialog_view = False
torrent = False
if os.path.isfile( torrent_file ):
dialog_view = True
torrent = True
if download > 0:
dialog_view = True
if "/" in video_file: video_file = video_file.split("/")[0]
if dialog_view:
d = xbmcgui.Dialog()
ok = d.yesno('pelisalacarta-MCT', 'Borrar las descargas del video', video_file)
# -- SI -------------------------------------------------
if ok:
# -- Borrar archivo - torrent -----------------------
if torrent:
os.remove( torrent_file )
# -- Borrar carpeta/archivos y sesión - vídeo -------
ses.remove_torrent( h, 1 )
print "### End session #########"
else:
# -- Borrar sesión ----------------------------------
ses.remove_torrent( h )
print "### End session #########"
else:
# -- Borrar sesión --------------------------------------
ses.remove_torrent( h )
print "### End session #########"
return
# -- Descargar de la web los datos para crear el torrent --------
# -- Si queremos aligerar el script mct.py se puede importar la -
# -- función del conentor torrent.py -
def url_get(url, params={}, headers={}):
from contextlib import closing
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:20.0) Gecko/20100101 Firefox/20.0"
if params:
import urllib
url = "%s?%s" % (url, urllib.urlencode(params))
req = urllib2.Request(url)
req.add_header("User-Agent", USER_AGENT)
for k, v in headers.items():
req.add_header(k, v)
try:
with closing(urllib2.urlopen(req)) as response:
data = response.read()
if response.headers.get("Content-Encoding", "") == "gzip":
import zlib
return zlib.decompressobj(16 + zlib.MAX_WBITS).decompress(data)
return data
except urllib2.HTTPError:
return None
# -- Procedimiento para log de have_piece en las pruebas --------
def print_have_piece_set(h, piece_set):
c = 0
_print = "\n"
for i, _set in enumerate(piece_set):
if h.have_piece(_set): _print+= "[%s]" % str(_set).zfill(5)
else: _print+= "[XXXXX]"
c+= 1
if c == 20:
c = 0
_print+= "\n"
print _print
# -- Contar las piezas contiguas completas del vídeo ------------
def count_completed_continuous_pieces(h, piece_set):
not_zero = 0
for i, _set in enumerate(piece_set):
if not h.have_piece(_set): break
else: not_zero = 1
return i + not_zero
# -- Prioritarizar o seleccionar las piezas del archivo que se -
# -- desea reproducir con 'file_priorities' estableciendo a 1 -
# -- el archivo deseado y a 0 el resto de archivos almacenando -
# -- en una lista los índices de de las piezas del archivo -
def set_priority_pieces(h, _index, video_file, video_size):
for i, _set in enumerate(h.file_priorities()):
if i != _index: h.file_priority(i,0)
else: h.file_priority(i,1)
piece_set = []
for i, _set in enumerate(h.piece_priorities()):
if _set == 1: piece_set.append(i)
return piece_set
|
MoRgUiJu/morguiju.repo
|
plugin.video.pelisalacarta/platformcode/mct.py
|
Python
|
gpl-2.0
| 29,761
|
# -*- coding: utf-8 -*-
import re
from datetime import datetime
from django.apps import apps
from django.db.models import Q
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.http import QueryDict
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from rest_framework import viewsets, exceptions, status, mixins, filters
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework_filters import backends
from .. import models, serializers
from ..filters import (
ComputerFilter, StoreFilter, PropertyFilter,
ProjectFilter, AttributeSetFilter, AttributeFilter, PackageFilter,
DeploymentFilter, ErrorFilter, FaultDefinitionFilter,
FaultFilter, NotificationFilter, MigrationFilter,
NodeFilter, SynchronizationFilter, StatusLogFilter,
DeviceFilter, DriverFilter, ScheduleDelayFilter,
)
from ..tasks import create_repository_metadata
class MigasViewSet(viewsets.ViewSet):
@action(methods=['get'], detail=True)
def relations(self, request, pk=None):
app = self.queryset.model._meta.app_label
model = self.queryset.model._meta.model_name
try:
response = apps.get_model(app, model).objects.get(pk=pk).relations(request)
return Response(response, status=status.HTTP_200_OK)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
@action(methods=['get'], detail=True)
def badge(self, request, pk=None):
app = self.queryset.model._meta.app_label
model = self.queryset.model._meta.model_name
try:
response = apps.get_model(app, model).objects.get(pk=pk).badge()
return Response(response, status=status.HTTP_200_OK)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
class AttributeSetViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.AttributeSet.objects.all()
serializer_class = serializers.AttributeSetSerializer
filter_class = AttributeSetFilter
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.AttributeSetWriteSerializer
return serializers.AttributeSetSerializer
class AttributeViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Attribute.objects.all()
serializer_class = serializers.AttributeSerializer
filter_class = AttributeFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['value', 'description']
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(id__in=user.get_attributes()).distinct()
return qs
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return serializers.AttributeWriteSerializer
return serializers.AttributeSerializer
@action(methods=['get', 'put', 'patch'], detail=True, url_path='logical-devices')
def logical_devices(self, request, pk=None):
"""
GET
returns: [
{
"id": 112,
"device": {
"id": 6,
"name": "19940"
},
"feature": {
"id": 2,
"name": "Color"
},
"name": ""
},
{
"id": 7,
"device": {
"id": 6,
"name": "19940"
},
"feature": {
"id": 1,
"name": "BN"
},
"name": ""
}
]
PUT, PATCH
input: [id1, id2, idN]
returns: status code 201
"""
attribute = get_object_or_404(models.Attribute, pk=pk)
logical_devices = attribute.devicelogical_set.all()
if request.method == 'GET':
serializer = serializers.LogicalSerializer(
logical_devices,
many=True
)
return Response(serializer.data, status=status.HTTP_200_OK)
if request.method == 'PATCH': # append cid attribute to logical devices
for device_id in request.data:
device = get_object_or_404(models.DeviceLogical, pk=device_id)
if device not in logical_devices:
device.attributes.add(pk)
return Response(status=status.HTTP_201_CREATED)
if request.method == 'PUT': # replace cid attribute in logical devices
for device in logical_devices:
if device in logical_devices:
device.attributes.remove(pk)
for device_id in request.data:
device = get_object_or_404(models.DeviceLogical, pk=device_id)
device.attributes.add(pk)
return Response(status=status.HTTP_201_CREATED)
class ComputerViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Computer.objects.all()
serializer_class = serializers.ComputerSerializer
filter_class = ComputerFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering = (settings.MIGASFREE_COMPUTER_SEARCH_FIELDS[0],)
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return serializers.ComputerWriteSerializer
return serializers.ComputerSerializer
def partial_update(self, request, *args, **kwargs):
if isinstance(request.data, QueryDict):
data = dict(request.data.lists())
else:
data = request.data
devices = data.get(
'assigned_logical_devices_to_cid[]',
data.get('assigned_logical_devices_to_cid', None)
)
if devices:
computer = get_object_or_404(models.Computer, pk=kwargs['pk'])
try:
assigned_logical_devices_to_cid = list(map(int, devices))
except ValueError:
assigned_logical_devices_to_cid = []
for item in assigned_logical_devices_to_cid:
logical_device = models.DeviceLogical.objects.get(pk=item)
model = models.DeviceModel.objects.get(device=logical_device.device)
if not models.DeviceDriver.objects.filter(
feature=logical_device.feature,
model=model,
project=computer.project
):
return Response(
_('Error in feature %s for assign computer %s.'
' There is no driver defined for project %s in model %s.') % (
logical_device.feature,
computer,
computer.project,
"<a href='{}'>{}</a>".format(
reverse(
'admin:server_devicemodel_change',
args=(model.pk,)
),
model
)
),
status=status.HTTP_400_BAD_REQUEST,
content_type='text/plain'
)
computer.update_logical_devices(assigned_logical_devices_to_cid)
return super(ComputerViewSet, self).partial_update(
request,
*args,
**kwargs
)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(id__in=user.get_computers())
return qs
@action(methods=['get'], detail=True, url_name='devices')
def devices(self, request, pk=None):
computer = get_object_or_404(models.Computer, pk=pk)
serializer = serializers.ComputerDevicesSerializer(computer, context={'request': request})
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(methods=['get'], detail=True, url_path='software/inventory', url_name='software_inventory')
def software_inventory(self, request, pk=None):
"""
Returns installed packages in a computer
"""
computer = get_object_or_404(models.Computer, pk=pk)
data = []
if computer.software_inventory:
data = re.sub(r'^\+', '', computer.software_inventory, flags=re.MULTILINE)
data = re.sub(r'^-', '', data, flags=re.MULTILINE)
data = data.rstrip().split('\n')
return Response(
data,
status=status.HTTP_200_OK
)
@action(methods=['get'], detail=True, url_path='software/history', url_name='software_history')
def software_history(self, request, pk=None):
"""
Returns software history of a computer
"""
computer = get_object_or_404(models.Computer, pk=pk)
return Response(
computer.software_history,
status=status.HTTP_200_OK
)
@action(methods=['post'], detail=True)
def status(self, request, pk=None):
"""
Input: {
'status': 'available' | 'reserved' | 'unsubscribed' | 'unknown'
| 'intended'
}
Changes computer status
"""
computer = get_object_or_404(models.Computer, pk=pk)
ret = computer.change_status(request.data.get('status'))
if not ret:
raise exceptions.ParseError(
_('Status must have one of the values: %s') % (
dict(models.Computer.STATUS_CHOICES).keys()
)
)
serializer = serializers.ComputerSerializer(computer, context={'request': request})
return Response(
serializer.data,
status=status.HTTP_200_OK
)
@action(methods=['post'], detail=True)
def replacement(self, request, pk=None):
"""
Input: {
'target': id
}
Exchanges tags and status
"""
source = get_object_or_404(models.Computer, pk=pk)
target = get_object_or_404(
models.Computer, id=request.data.get('target')
)
models.Computer.replacement(source, target)
return Response(status=status.HTTP_200_OK)
@action(methods=['get'], detail=True)
def sync(self, request, pk=None):
"""
:returns
{
"date": "Y-m-d H:M:s",
"user": {
"id": x,
"name": "xxx",
"fullname": "xxxxx"
},
"attributes": [
{
"id": x,
"value": "xxx",
"description": "xxxxx",
"total_computers"; xx,
"property_att": {
"id": x,
"prefix": "xxx"
}
},
...
]
}
"""
computer = get_object_or_404(models.Computer, pk=pk)
serializer = serializers.ComputerSyncSerializer(computer, context={'request': request})
return Response(serializer.data, status=status.HTTP_200_OK)
@action(methods=['get'], detail=True)
def situation(self, request, pk=None):
"""
:param request
date
:param pk: computer id
:return:
{
"platform": {
"id": x,
"name": "xxx"
},
"project": {
"id": x,
"name": "xxx"
},
"status": "xxx"
}
"""
user = request.user.userprofile
computer = get_object_or_404(models.Computer, pk=pk)
date = request.GET.get('date', datetime.now())
migration = models.Migration.situation(computer.id, date, user)
status_log = models.StatusLog.situation(computer.id, date, user)
response = {}
if migration:
serializer = serializers.PlatformSerializer(migration.project.platform, context={'request': request})
response['platform'] = serializer.data
serializer = serializers.ProjectInfoSerializer(migration.project, context={'request': request})
response['project'] = serializer.data
if status_log:
response['status'] = status_log.status
else:
if isinstance(date, str):
date = datetime.strptime(date, '%Y-%m-%d')
if date >= computer.created_at:
response['status'] = settings.MIGASFREE_DEFAULT_COMPUTER_STATUS
return Response(response, status=status.HTTP_200_OK)
class ErrorViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, mixins.DestroyModelMixin,
viewsets.GenericViewSet, MigasViewSet
):
queryset = models.Error.objects.all()
serializer_class = serializers.ErrorSerializer
filter_class = ErrorFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['created_at', 'description']
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return serializers.ErrorWriteSerializer
return serializers.ErrorSerializer
class FaultDefinitionViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.FaultDefinition.objects.all()
serializer_class = serializers.FaultDefinitionSerializer
filter_class = FaultDefinitionFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.FaultDefinitionWriteSerializer
return serializers.FaultDefinitionSerializer
class FaultViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, mixins.DestroyModelMixin,
viewsets.GenericViewSet, MigasViewSet
):
queryset = models.Fault.objects.all()
serializer_class = serializers.FaultSerializer
filter_class = FaultFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['created_at', 'result']
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return serializers.FaultWriteSerializer
return serializers.FaultSerializer
class HardwareComputerViewSet(viewsets.ViewSet):
queryset = models.HwNode.objects.all() # FIXME
@action(methods=['get'], detail=True)
def hardware(self, request, pk=None):
computer = get_object_or_404(models.Computer, pk=pk)
nodes = models.HwNode.objects.filter(computer=computer).order_by('id')
serializer = serializers.NodeSerializer(nodes, many=True)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
class HardwareViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
viewsets.GenericViewSet, MigasViewSet
):
queryset = models.HwNode.objects.all()
serializer_class = serializers.NodeSerializer
filter_class = NodeFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('id',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(computer_id__in=user.get_computers())
return qs
class MigrationViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin, viewsets.GenericViewSet, MigasViewSet
):
queryset = models.Migration.objects.all()
serializer_class = serializers.MigrationSerializer
filter_class = MigrationFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class NotificationViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.UpdateModelMixin, mixins.DestroyModelMixin,
viewsets.GenericViewSet, MigasViewSet
):
queryset = models.Notification.objects.all()
serializer_class = serializers.NotificationSerializer
filter_class = NotificationFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['message']
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_serializer_class(self):
if self.action == 'update' or self.action == 'partial_update':
return serializers.NotificationWriteSerializer
return serializers.NotificationSerializer
class PackageViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Package.objects.all()
serializer_class = serializers.PackageSerializer
filter_class = PackageFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name', 'project__name')
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(project__in=user.get_projects())
return qs
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.PackageWriteSerializer
return serializers.PackageSerializer
@action(methods=['get'], detail=False)
def orphan(self, request):
"""
Returns packages that are not in any deployment
"""
serializer = serializers.PackageSerializer(
models.Package.objects.filter(deployment__id=None),
many=True
)
return Response(
serializer.data,
status=status.HTTP_200_OK
)
class PlatformViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Platform.objects.all()
serializer_class = serializers.PlatformSerializer
ordering_fields = '__all__'
ordering = ('name',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(project__in=user.get_projects()).distinct()
return qs
class PmsViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Pms.objects.all()
serializer_class = serializers.PmsSerializer
ordering_fields = '__all__'
ordering = ('name',)
class PropertyViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Property.objects.all()
serializer_class = serializers.PropertySerializer
filter_class = PropertyFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['name', 'language', 'code']
ordering_fields = '__all__'
ordering = ('prefix', 'name')
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.PropertyWriteSerializer
return serializers.PropertySerializer
class InternalSourceViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.InternalSource.objects.all()
serializer_class = serializers.InternalSourceSerializer
filter_class = DeploymentFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-start_date', 'name')
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset.filter(source=models.Deployment.SOURCE_INTERNAL)
if not user.is_view_all():
qs = qs.filter(project__in=user.get_projects())
if user.domain_preference:
qs = qs.filter(domain=user.domain_preference)
return qs
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.InternalSourceWriteSerializer
return serializers.InternalSourceSerializer
@action(methods=['get'], detail=True)
def metadata(self, request, pk=None):
"""
Creates repository metadata
"""
get_object_or_404(models.InternalSource, pk=pk)
ret = create_repository_metadata(pk)
return Response(
{'detail': ret},
status=status.HTTP_200_OK
)
class ExternalSourceViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.ExternalSource.objects.all()
serializer_class = serializers.ExternalSourceSerializer
filter_class = DeploymentFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('-start_date', 'name')
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset.filter(source=models.Deployment.SOURCE_EXTERNAL)
if not user.is_view_all():
qs = qs.filter(project__in=user.get_projects())
if user.domain_preference:
qs = qs.filter(domain=user.domain_preference)
return qs
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ExternalSourceWriteSerializer
return serializers.ExternalSourceSerializer
class ScheduleDelayViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.ScheduleDelay.objects.all()
serializer_class = serializers.ScheduleDelaySerializer
filter_class = ScheduleDelayFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('delay',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ScheduleDelayWriteSerializer
return serializers.ScheduleDelaySerializer
class ScheduleViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Schedule.objects.all()
serializer_class = serializers.ScheduleSerializer
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ScheduleWriteSerializer
return serializers.ScheduleSerializer
class StatusLogViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin, viewsets.GenericViewSet, MigasViewSet
):
queryset = models.StatusLog.objects.all()
serializer_class = serializers.StatusLogSerializer
filter_class = StatusLogFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['status']
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(computer_id__in=user.get_computers())
return qs
class StoreViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Store.objects.all()
serializer_class = serializers.StoreSerializer
filter_class = StoreFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['name']
ordering_fields = '__all__'
ordering = ('name', 'project__name')
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(project__in=user.get_projects())
return qs
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.StoreWriteSerializer
return serializers.StoreSerializer
class SynchronizationViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin, viewsets.GenericViewSet, MigasViewSet
):
queryset = models.Synchronization.objects.all()
serializer_class = serializers.SynchronizationSerializer
filter_class = SynchronizationFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend, filters.SearchFilter)
search_fields = ['user__name', 'user__fullname']
ordering_fields = '__all__'
ordering = ('-created_at',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class UserViewSet(
mixins.ListModelMixin, mixins.RetrieveModelMixin,
mixins.DestroyModelMixin, viewsets.GenericViewSet, MigasViewSet
):
queryset = models.User.objects.all()
serializer_class = serializers.UserSerializer
ordering_fields = '__all__'
ordering = ('name',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(computer__in=user.get_computers())
return qs
class ProjectViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Project.objects.all()
serializer_class = serializers.ProjectSerializer
filter_class = ProjectFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_queryset(self):
user = self.request.user.userprofile
qs = self.queryset
if not user.is_view_all():
qs = qs.filter(id__in=user.get_projects())
return qs
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ProjectWriteSerializer
return serializers.ProjectSerializer
class DomainViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Domain.objects.all()
serializer_class = serializers.DomainSerializer
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.DomainWriteSerializer
return serializers.DomainSerializer
class ScopeViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Scope.objects.all()
serializer_class = serializers.ScopeSerializer
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ScopeWriteSerializer
return serializers.ScopeSerializer
class ConnectionViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceConnection.objects.all()
serializer_class = serializers.ConnectionSerializer
ordering_fields = '__all__'
ordering = ('id',)
class DeviceViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.Device.objects.all()
serializer_class = serializers.DeviceSerializer
filter_class = DeviceFilter
filter_backends = (filters.OrderingFilter, backends.DjangoFilterBackend)
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.DeviceWriteSerializer
return serializers.DeviceSerializer
@action(methods=['get'], detail=False)
def available(self, request):
"""
:param request:
cid (computer Id) int,
q string (name or data contains...),
page int
:return: DeviceSerializer set
"""
computer = get_object_or_404(models.Computer, pk=request.GET.get('cid', 0))
query = request.GET.get('q', '')
results = models.Device.objects.filter(
available_for_attributes__in=computer.sync_attributes.values_list('id', flat=True)
).order_by('name', 'model__name').distinct()
if query:
results = results.filter(Q(name__icontains=query) | Q(data__icontains=query))
page = self.paginate_queryset(results)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(results, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class DriverViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceDriver.objects.all()
serializer_class = serializers.DriverSerializer
filter_class = DriverFilter
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.DriverWriteSerializer
return serializers.DriverSerializer
class FeatureViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceFeature.objects.all()
serializer_class = serializers.FeatureSerializer
ordering_fields = '__all__'
ordering = ('name',)
class LogicalViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceLogical.objects.all()
serializer_class = serializers.LogicalSerializer
ordering_fields = '__all__'
ordering = ('device__name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.LogicalWriteSerializer
return serializers.LogicalSerializer
@action(methods=['get'], detail=False)
def available(self, request):
"""
:param request:
cid (computer Id) int,
q string (name or data contains...),
did (device Id) int,
page int
:return: DeviceLogicalSerializer set
"""
computer = get_object_or_404(models.Computer, pk=request.GET.get('cid', 0))
query = request.GET.get('q', '')
device = request.GET.get('did', 0)
results = models.DeviceLogical.objects.filter(
device__available_for_attributes__in=computer.sync_attributes.values_list('id', flat=True)
).order_by('device__name', 'feature__name').distinct()
if query:
results = results.filter(Q(device__name__icontains=query) | Q(device__data__icontains=query))
if device:
results = results.filter(device__id=device)
page = self.paginate_queryset(results)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(results, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class ManufacturerViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceManufacturer.objects.all()
serializer_class = serializers.ManufacturerSerializer
ordering_fields = '__all__'
ordering = ('name',)
class ModelViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceModel.objects.all()
serializer_class = serializers.ModelSerializer
ordering_fields = '__all__'
ordering = ('name',)
def get_serializer_class(self):
if self.action == 'create' or self.action == 'update' \
or self.action == 'partial_update':
return serializers.ModelWriteSerializer
return serializers.ModelSerializer
class TypeViewSet(viewsets.ModelViewSet, MigasViewSet):
queryset = models.DeviceType.objects.all()
serializer_class = serializers.TypeSerializer
ordering_fields = '__all__'
ordering = ('name',)
|
migasfree/migasfree
|
migasfree/server/views/token.py
|
Python
|
gpl-3.0
| 34,742
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import datetime
import sys
from oslo_config import cfg
from oslo_db import api as oslo_db_api
from oslo_db.sqlalchemy import session as db_session
from oslo_db.sqlalchemy import utils
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
from oslo_utils import timeutils
import osprofiler.sqlalchemy
import six
import sqlalchemy
from sqlalchemy import and_
from sqlalchemy import func
from sqlalchemy import orm
from sqlalchemy.orm import aliased as orm_aliased
from sqlalchemy.orm import session as orm_session
from heat.common import crypt
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.db.sqlalchemy import filters as db_filters
from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models
from heat.db.sqlalchemy import utils as db_utils
from heat.engine import environment as heat_environment
from heat.rpc import api as rpc_api
CONF = cfg.CONF
CONF.import_opt('hidden_stack_tags', 'heat.common.config')
CONF.import_opt('max_events_per_stack', 'heat.common.config')
CONF.import_group('profiler', 'heat.common.config')
_facade = None
LOG = logging.getLogger(__name__)
def get_facade():
global _facade
if not _facade:
_facade = db_session.EngineFacade.from_config(CONF)
if CONF.profiler.enabled:
if CONF.profiler.trace_sqlalchemy:
osprofiler.sqlalchemy.add_tracing(sqlalchemy,
_facade.get_engine(),
"db")
return _facade
def get_engine():
return get_facade().get_engine()
def get_session():
return get_facade().get_session()
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def update_and_save(context, obj, values):
with context.session.begin(subtransactions=True):
for k, v in six.iteritems(values):
setattr(obj, k, v)
def delete_softly(context, obj):
"""Mark this object as deleted."""
update_and_save(context, obj, {'deleted_at': timeutils.utcnow()})
def soft_delete_aware_query(context, *args, **kwargs):
"""Stack query helper that accounts for context's `show_deleted` field.
:param show_deleted: if True, overrides context's show_deleted field.
"""
query = context.session.query(*args)
show_deleted = kwargs.get('show_deleted') or context.show_deleted
if not show_deleted:
query = query.filter_by(deleted_at=None)
return query
def raw_template_get(context, template_id):
result = context.session.query(models.RawTemplate).get(template_id)
if not result:
raise exception.NotFound(_('raw template with id %s not found') %
template_id)
return result
def raw_template_create(context, values):
raw_template_ref = models.RawTemplate()
raw_template_ref.update(values)
raw_template_ref.save(context.session)
return raw_template_ref
def raw_template_update(context, template_id, values):
raw_template_ref = raw_template_get(context, template_id)
# get only the changed values
values = dict((k, v) for k, v in values.items()
if getattr(raw_template_ref, k) != v)
if values:
update_and_save(context, raw_template_ref, values)
return raw_template_ref
def raw_template_delete(context, template_id):
raw_template = raw_template_get(context, template_id)
raw_tmpl_files_id = raw_template.files_id
session = context.session
with session.begin(subtransactions=True):
session.delete(raw_template)
if raw_tmpl_files_id is None:
return
# If no other raw_template is referencing the same raw_template_files,
# delete that too
if session.query(models.RawTemplate).filter_by(
files_id=raw_tmpl_files_id).first() is None:
raw_tmpl_files = raw_template_files_get(context, raw_tmpl_files_id)
session.delete(raw_tmpl_files)
def raw_template_files_create(context, values):
session = context.session
raw_templ_files_ref = models.RawTemplateFiles()
raw_templ_files_ref.update(values)
with session.begin():
raw_templ_files_ref.save(session)
return raw_templ_files_ref
def raw_template_files_get(context, files_id):
session = context.session if context else get_session()
result = session.query(models.RawTemplateFiles).get(files_id)
if not result:
raise exception.NotFound(
_("raw_template_files with files_id %d not found") %
files_id)
return result
def resource_get(context, resource_id, refresh=False):
result = context.session.query(models.Resource).get(resource_id)
if not result:
raise exception.NotFound(_("resource with id %s not found") %
resource_id)
if refresh:
context.session.refresh(result)
# ensure data is loaded (lazy or otherwise)
result.data
return result
def resource_get_by_name_and_stack(context, resource_name, stack_id):
result = context.session.query(
models.Resource
).filter_by(
name=resource_name
).filter_by(
stack_id=stack_id
).options(orm.joinedload("data")).first()
return result
def resource_get_by_physical_resource_id(context, physical_resource_id):
results = (context.session.query(models.Resource)
.filter_by(physical_resource_id=physical_resource_id)
.all())
for result in results:
if context is None or context.tenant_id in (
result.stack.tenant, result.stack.stack_user_project_id):
return result
return None
def resource_get_all(context):
results = context.session.query(models.Resource).all()
if not results:
raise exception.NotFound(_('no resources were found'))
return results
def resource_purge_deleted(context, stack_id):
filters = {'stack_id': stack_id, 'action': 'DELETE', 'status': 'COMPLETE'}
query = context.session.query(models.Resource.id)
result = query.filter_by(**filters)
result.delete()
def resource_update(context, resource_id, values, atomic_key,
expected_engine_id=None):
session = context.session
with session.begin(subtransactions=True):
if atomic_key is None:
values['atomic_key'] = 1
else:
values['atomic_key'] = atomic_key + 1
rows_updated = session.query(models.Resource).filter_by(
id=resource_id, engine_id=expected_engine_id,
atomic_key=atomic_key).update(values)
return bool(rows_updated)
def resource_update_and_save(context, resource_id, values):
resource = context.session.query(models.Resource).get(resource_id)
update_and_save(context, resource, values)
def resource_delete(context, resource_id):
session = context.session
with session.begin(subtransactions=True):
resource = session.query(models.Resource).get(resource_id)
if resource:
session.delete(resource)
def resource_data_get_all(context, resource_id, data=None):
"""Looks up resource_data by resource.id.
If data is encrypted, this method will decrypt the results.
"""
if data is None:
data = (context.session.query(models.ResourceData)
.filter_by(resource_id=resource_id)).all()
if not data:
raise exception.NotFound(_('no resource data found'))
ret = {}
for res in data:
if res.redact:
ret[res.key] = crypt.decrypt(res.decrypt_method, res.value)
else:
ret[res.key] = res.value
return ret
def resource_data_get(context, resource_id, key):
"""Lookup value of resource's data by key.
Decrypts resource data if necessary.
"""
result = resource_data_get_by_key(context,
resource_id,
key)
if result.redact:
return crypt.decrypt(result.decrypt_method, result.value)
return result.value
def stack_tags_set(context, stack_id, tags):
session = context.session
with session.begin():
stack_tags_delete(context, stack_id)
result = []
for tag in tags:
stack_tag = models.StackTag()
stack_tag.tag = tag
stack_tag.stack_id = stack_id
stack_tag.save(session=session)
result.append(stack_tag)
return result or None
def stack_tags_delete(context, stack_id):
session = context.session
with session.begin(subtransactions=True):
result = stack_tags_get(context, stack_id)
if result:
for tag in result:
session.delete(tag)
def stack_tags_get(context, stack_id):
result = (context.session.query(models.StackTag)
.filter_by(stack_id=stack_id)
.all())
return result or None
def resource_data_get_by_key(context, resource_id, key):
"""Looks up resource_data by resource_id and key.
Does not decrypt resource_data.
"""
result = (context.session.query(models.ResourceData)
.filter_by(resource_id=resource_id)
.filter_by(key=key).first())
if not result:
raise exception.NotFound(_('No resource data found'))
return result
def resource_data_set(context, resource_id, key, value, redact=False):
"""Save resource's key/value pair to database."""
if redact:
method, value = crypt.encrypt(value)
else:
method = ''
try:
current = resource_data_get_by_key(context, resource_id, key)
except exception.NotFound:
current = models.ResourceData()
current.key = key
current.resource_id = resource_id
current.redact = redact
current.value = value
current.decrypt_method = method
current.save(session=context.session)
return current
def resource_exchange_stacks(context, resource_id1, resource_id2):
query = context.session.query(models.Resource)
session = query.session
session.begin()
res1 = query.get(resource_id1)
res2 = query.get(resource_id2)
res1.stack, res2.stack = res2.stack, res1.stack
session.commit()
def resource_data_delete(context, resource_id, key):
result = resource_data_get_by_key(context, resource_id, key)
session = context.session
with session.begin(subtransactions=True):
session.delete(result)
def resource_create(context, values):
resource_ref = models.Resource()
resource_ref.update(values)
resource_ref.save(context.session)
return resource_ref
def resource_get_all_by_stack(context, stack_id, filters=None):
query = context.session.query(
models.Resource
).filter_by(
stack_id=stack_id
).options(orm.joinedload("data"))
query = db_filters.exact_filter(query, models.Resource, filters)
results = query.all()
return dict((res.name, res) for res in results)
def resource_get_all_active_by_stack(context, stack_id):
filters = {'stack_id': stack_id, 'action': 'DELETE', 'status': 'COMPLETE'}
subquery = context.session.query(models.Resource.id).filter_by(**filters)
results = context.session.query(models.Resource).filter_by(
stack_id=stack_id).filter(
models.Resource.id.notin_(subquery.as_scalar())
).options(orm.joinedload("data")).all()
return dict((res.id, res) for res in results)
def resource_get_all_by_root_stack(context, stack_id, filters=None):
query = context.session.query(
models.Resource
).filter_by(
root_stack_id=stack_id
).options(orm.joinedload("data"))
query = db_filters.exact_filter(query, models.Resource, filters)
results = query.all()
return dict((res.id, res) for res in results)
def stack_get_by_name_and_owner_id(context, stack_name, owner_id):
query = soft_delete_aware_query(
context, models.Stack
).options(orm.joinedload("raw_template")).filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id)
).filter_by(name=stack_name).filter_by(owner_id=owner_id)
return query.first()
def stack_get_by_name(context, stack_name):
query = soft_delete_aware_query(
context, models.Stack
).options(orm.joinedload("raw_template")).filter(sqlalchemy.or_(
models.Stack.tenant == context.tenant_id,
models.Stack.stack_user_project_id == context.tenant_id)
).filter_by(name=stack_name)
return query.first()
def stack_get(context, stack_id, show_deleted=False):
query = context.session.query(models.Stack).options(
orm.joinedload("raw_template"))
result = query.get(stack_id)
deleted_ok = show_deleted or context.show_deleted
if result is None or result.deleted_at is not None and not deleted_ok:
return None
# One exception to normal project scoping is users created by the
# stacks in the stack_user_project_id (in the heat stack user domain)
if (result is not None
and context is not None and not context.is_admin
and context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
return None
return result
def stack_get_status(context, stack_id):
query = context.session.query(models.Stack)
query = query.options(
orm.load_only("action", "status", "status_reason", "updated_at"))
result = query.filter_by(id=stack_id).first()
if result is None:
raise exception.NotFound(_('Stack with id %s not found') % stack_id)
return (result.action, result.status, result.status_reason,
result.updated_at)
def stack_get_all_by_owner_id(context, owner_id):
results = soft_delete_aware_query(
context, models.Stack).filter_by(owner_id=owner_id).all()
return results
def stack_get_all_by_root_owner_id(context, owner_id):
for stack in stack_get_all_by_owner_id(context, owner_id):
yield stack
for ch_st in stack_get_all_by_root_owner_id(context, stack.id):
yield ch_st
def _get_sort_keys(sort_keys, mapping):
"""Returns an array containing only whitelisted keys
:param sort_keys: an array of strings
:param mapping: a mapping from keys to DB column names
:returns: filtered list of sort keys
"""
if isinstance(sort_keys, six.string_types):
sort_keys = [sort_keys]
return [mapping[key] for key in sort_keys or [] if key in mapping]
def _paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
model_marker = context.session.query(model).get(marker)
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
err_msg = encodeutils.exception_to_unicode(exc)
raise exception.Invalid(reason=err_msg)
return query
def _query_stack_get_all(context, show_deleted=False,
show_nested=False, show_hidden=False, tags=None,
tags_any=None, not_tags=None, not_tags_any=None):
if show_nested:
query = soft_delete_aware_query(
context, models.Stack, show_deleted=show_deleted
).filter_by(backup=False)
else:
query = soft_delete_aware_query(
context, models.Stack, show_deleted=show_deleted
).filter_by(owner_id=None)
if not context.is_admin:
query = query.filter_by(tenant=context.tenant_id)
query = query.options(orm.subqueryload("tags"))
if tags:
for tag in tags:
tag_alias = orm_aliased(models.StackTag)
query = query.join(tag_alias, models.Stack.tags)
query = query.filter(tag_alias.tag == tag)
if tags_any:
query = query.filter(
models.Stack.tags.any(
models.StackTag.tag.in_(tags_any)))
if not_tags:
subquery = soft_delete_aware_query(
context, models.Stack, show_deleted=show_deleted
)
for tag in not_tags:
tag_alias = orm_aliased(models.StackTag)
subquery = subquery.join(tag_alias, models.Stack.tags)
subquery = subquery.filter(tag_alias.tag == tag)
not_stack_ids = [s.id for s in subquery.all()]
query = query.filter(models.Stack.id.notin_(not_stack_ids))
if not_tags_any:
query = query.filter(
~models.Stack.tags.any(
models.StackTag.tag.in_(not_tags_any)))
if not show_hidden and cfg.CONF.hidden_stack_tags:
query = query.filter(
~models.Stack.tags.any(
models.StackTag.tag.in_(cfg.CONF.hidden_stack_tags)))
return query
def stack_get_all(context, limit=None, sort_keys=None, marker=None,
sort_dir=None, filters=None,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
query = _query_stack_get_all(context,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden, tags=tags,
tags_any=tags_any, not_tags=not_tags,
not_tags_any=not_tags_any)
query = query.options(orm.joinedload("raw_template"))
return _filter_and_page_query(context, query, limit, sort_keys,
marker, sort_dir, filters).all()
def _filter_and_page_query(context, query, limit=None, sort_keys=None,
marker=None, sort_dir=None, filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.STACK_NAME: models.Stack.name.key,
rpc_api.STACK_STATUS: models.Stack.status.key,
rpc_api.STACK_CREATION_TIME: models.Stack.created_at.key,
rpc_api.STACK_UPDATED_TIME: models.Stack.updated_at.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Stack, filters)
return _paginate_query(context, query, models.Stack, limit,
whitelisted_sort_keys, marker, sort_dir)
def stack_count_all(context, filters=None,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
query = _query_stack_get_all(context,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden, tags=tags,
tags_any=tags_any, not_tags=not_tags,
not_tags_any=not_tags_any)
query = db_filters.exact_filter(query, models.Stack, filters)
return query.count()
def stack_create(context, values):
stack_ref = models.Stack()
stack_ref.update(values)
stack_ref.save(context.session)
return stack_ref
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
retry_interval=0.5, inc_retry_interval=True)
def stack_update(context, stack_id, values, exp_trvsl=None):
stack = stack_get(context, stack_id)
if stack is None:
raise exception.NotFound(_('Attempt to update a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
if (exp_trvsl is not None
and stack.current_traversal != exp_trvsl):
# stack updated by another update
return False
session = context.session
with session.begin(subtransactions=True):
rows_updated = (session.query(models.Stack)
.filter(models.Stack.id == stack.id)
.filter(models.Stack.current_traversal
== stack.current_traversal)
.update(values, synchronize_session=False))
session.expire_all()
return (rows_updated is not None and rows_updated > 0)
def stack_delete(context, stack_id):
s = stack_get(context, stack_id)
if not s:
raise exception.NotFound(_('Attempt to delete a stack with id: '
'%(id)s %(msg)s') % {
'id': stack_id,
'msg': 'that does not exist'})
session = context.session
with session.begin():
for r in s.resources:
session.delete(r)
delete_softly(context, s)
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
retry_interval=0.5, inc_retry_interval=True)
def stack_lock_create(context, stack_id, engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
if lock is not None:
return lock.engine_id
session.add(models.StackLock(stack_id=stack_id, engine_id=engine_id))
def stack_lock_get_engine_id(context, stack_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
if lock is not None:
return lock.engine_id
def persist_state_and_release_lock(context, stack_id, engine_id, values):
session = context.session
with session.begin():
rows_updated = (session.query(models.Stack)
.filter(models.Stack.id == stack_id)
.update(values, synchronize_session=False))
rows_affected = None
if rows_updated is not None and rows_updated > 0:
rows_affected = session.query(
models.StackLock
).filter_by(stack_id=stack_id, engine_id=engine_id).delete()
session.expire_all()
if not rows_affected:
return True
def stack_lock_steal(context, stack_id, old_engine_id, new_engine_id):
session = get_session()
with session.begin():
lock = session.query(models.StackLock).get(stack_id)
rows_affected = session.query(
models.StackLock
).filter_by(stack_id=stack_id, engine_id=old_engine_id
).update({"engine_id": new_engine_id})
if not rows_affected:
return lock.engine_id if lock is not None else True
def stack_lock_release(context, stack_id, engine_id):
session = get_session()
with session.begin():
rows_affected = session.query(
models.StackLock
).filter_by(stack_id=stack_id, engine_id=engine_id).delete()
if not rows_affected:
return True
def stack_get_root_id(context, stack_id):
s = stack_get(context, stack_id)
if not s:
return None
while s.owner_id:
s = stack_get(context, s.owner_id)
return s.id
def stack_count_total_resources(context, stack_id):
# count all resources which belong to the root stack
results = context.session.query(
models.Resource
).filter(models.Resource.root_stack_id == stack_id).count()
return results
def user_creds_create(context):
values = context.to_dict()
user_creds_ref = models.UserCreds()
if values.get('trust_id'):
method, trust_id = crypt.encrypt(values.get('trust_id'))
user_creds_ref.trust_id = trust_id
user_creds_ref.decrypt_method = method
user_creds_ref.trustor_user_id = values.get('trustor_user_id')
user_creds_ref.username = None
user_creds_ref.password = None
user_creds_ref.tenant = values.get('tenant')
user_creds_ref.tenant_id = values.get('tenant_id')
user_creds_ref.auth_url = values.get('auth_url')
user_creds_ref.region_name = values.get('region_name')
else:
user_creds_ref.update(values)
method, password = crypt.encrypt(values['password'])
if len(six.text_type(password)) > 255:
raise exception.Error(_("Length of OS_PASSWORD after encryption"
" exceeds Heat limit (255 chars)"))
user_creds_ref.password = password
user_creds_ref.decrypt_method = method
user_creds_ref.save(context.session)
result = dict(user_creds_ref)
if values.get('trust_id'):
result['trust_id'] = values.get('trust_id')
else:
result['password'] = values.get('password')
return result
def user_creds_get(context, user_creds_id):
db_result = context.session.query(models.UserCreds).get(user_creds_id)
if db_result is None:
return None
# Return a dict copy of db results, do not decrypt details into db_result
# or it can be committed back to the DB in decrypted form
result = dict(db_result)
del result['decrypt_method']
result['password'] = crypt.decrypt(
db_result.decrypt_method, result['password'])
result['trust_id'] = crypt.decrypt(
db_result.decrypt_method, result['trust_id'])
return result
@db_utils.retry_on_stale_data_error
def user_creds_delete(context, user_creds_id):
creds = context.session.query(models.UserCreds).get(user_creds_id)
if not creds:
raise exception.NotFound(
_('Attempt to delete user creds with id '
'%(id)s that does not exist') % {'id': user_creds_id})
session = orm_session.Session.object_session(creds)
with session.begin():
session.delete(creds)
def event_get(context, event_id):
result = context.session.query(models.Event).get(event_id)
return result
def event_get_all(context):
stacks = soft_delete_aware_query(context, models.Stack)
stack_ids = [stack.id for stack in stacks]
results = context.session.query(
models.Event
).filter(models.Event.stack_id.in_(stack_ids)).all()
return results
def event_get_all_by_tenant(context, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = context.session.query(models.Event)
query = db_filters.exact_filter(query, models.Event, filters)
query = query.join(
models.Event.stack
).filter_by(tenant=context.tenant_id).filter_by(deleted_at=None)
filters = None
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _query_all_by_stack(context, stack_id):
query = context.session.query(models.Event).filter_by(stack_id=stack_id)
return query
def event_get_all_by_stack(context, stack_id, limit=None, marker=None,
sort_keys=None, sort_dir=None, filters=None):
query = _query_all_by_stack(context, stack_id)
return _events_filter_and_page_query(context, query, limit, marker,
sort_keys, sort_dir, filters).all()
def _events_paginate_query(context, query, model, limit=None, sort_keys=None,
marker=None, sort_dir=None):
default_sort_keys = ['created_at']
if not sort_keys:
sort_keys = default_sort_keys
if not sort_dir:
sort_dir = 'desc'
# This assures the order of the stacks will always be the same
# even for sort_key values that are not unique in the database
sort_keys = sort_keys + ['id']
model_marker = None
if marker:
# not to use context.session.query(model).get(marker), because
# user can only see the ID(column 'uuid') and the ID as the marker
model_marker = context.session.query(
model).filter_by(uuid=marker).first()
try:
query = utils.paginate_query(query, model, limit, sort_keys,
model_marker, sort_dir)
except utils.InvalidSortKey as exc:
err_msg = encodeutils.exception_to_unicode(exc)
raise exception.Invalid(reason=err_msg)
return query
def _events_filter_and_page_query(context, query,
limit=None, marker=None,
sort_keys=None, sort_dir=None,
filters=None):
if filters is None:
filters = {}
sort_key_map = {rpc_api.EVENT_TIMESTAMP: models.Event.created_at.key,
rpc_api.EVENT_RES_TYPE: models.Event.resource_type.key}
whitelisted_sort_keys = _get_sort_keys(sort_keys, sort_key_map)
query = db_filters.exact_filter(query, models.Event, filters)
return _events_paginate_query(context, query, models.Event, limit,
whitelisted_sort_keys, marker, sort_dir)
def event_count_all_by_stack(context, stack_id):
query = context.session.query(func.count(models.Event.id))
return query.filter_by(stack_id=stack_id).scalar()
def _delete_event_rows(context, stack_id, limit):
# MySQL does not support LIMIT in subqueries,
# sqlite does not support JOIN in DELETE.
# So we must manually supply the IN() values.
# pgsql SHOULD work with the pure DELETE/JOIN below but that must be
# confirmed via integration tests.
query = _query_all_by_stack(context, stack_id)
session = context.session
ids = [r.id for r in query.order_by(
models.Event.id).limit(limit).all()]
q = session.query(models.Event).filter(
models.Event.id.in_(ids))
return q.delete(synchronize_session='fetch')
def event_create(context, values):
if 'stack_id' in values and cfg.CONF.max_events_per_stack:
if ((event_count_all_by_stack(context, values['stack_id']) >=
cfg.CONF.max_events_per_stack)):
# prune
_delete_event_rows(
context, values['stack_id'], cfg.CONF.event_purge_batch_size)
event_ref = models.Event()
event_ref.update(values)
event_ref.save(context.session)
return event_ref
def watch_rule_get(context, watch_rule_id):
result = context.session.query(models.WatchRule).get(watch_rule_id)
return result
def watch_rule_get_by_name(context, watch_rule_name):
result = context.session.query(
models.WatchRule).filter_by(name=watch_rule_name).first()
return result
def watch_rule_get_all(context):
results = context.session.query(models.WatchRule).all()
return results
def watch_rule_get_all_by_stack(context, stack_id):
results = context.session.query(
models.WatchRule).filter_by(stack_id=stack_id).all()
return results
def watch_rule_create(context, values):
obj_ref = models.WatchRule()
obj_ref.update(values)
obj_ref.save(context.session)
return obj_ref
def watch_rule_update(context, watch_id, values):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to update a watch with id: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
wr.update(values)
wr.save(context.session)
def watch_rule_delete(context, watch_id):
wr = watch_rule_get(context, watch_id)
if not wr:
raise exception.NotFound(_('Attempt to delete watch_rule: '
'%(id)s %(msg)s') % {
'id': watch_id,
'msg': 'that does not exist'})
session = orm_session.Session.object_session(wr)
with session.begin():
for d in wr.watch_data:
session.delete(d)
session.delete(wr)
def watch_data_create(context, values):
obj_ref = models.WatchData()
obj_ref.update(values)
obj_ref.save(context.session)
return obj_ref
def watch_data_get_all(context):
results = context.session.query(models.WatchData).all()
return results
def watch_data_get_all_by_watch_rule_id(context, watch_rule_id):
results = context.session.query(models.WatchData).filter_by(
watch_rule_id=watch_rule_id).all()
return results
def software_config_create(context, values):
obj_ref = models.SoftwareConfig()
obj_ref.update(values)
obj_ref.save(context.session)
return obj_ref
def software_config_get(context, config_id):
result = context.session.query(models.SoftwareConfig).get(config_id)
if (result is not None and context is not None and
result.tenant != context.tenant_id):
result = None
if not result:
raise exception.NotFound(_('Software config with id %s not found') %
config_id)
return result
def software_config_get_all(context, limit=None, marker=None):
query = context.session.query(models.SoftwareConfig)
if not context.is_admin:
query = query.filter_by(tenant=context.tenant_id)
return _paginate_query(context, query, models.SoftwareConfig,
limit=limit, marker=marker).all()
def software_config_delete(context, config_id):
config = software_config_get(context, config_id)
# Query if the software config has been referenced by deployment.
result = context.session.query(models.SoftwareDeployment).filter_by(
config_id=config_id).first()
if result:
msg = (_("Software config with id %s can not be deleted as "
"it is referenced.") % config_id)
raise exception.InvalidRestrictedAction(message=msg)
session = orm_session.Session.object_session(config)
with session.begin():
session.delete(config)
def software_deployment_create(context, values):
obj_ref = models.SoftwareDeployment()
obj_ref.update(values)
session = context.session
session.begin()
obj_ref.save(session)
session.commit()
return obj_ref
def software_deployment_get(context, deployment_id):
result = context.session.query(
models.SoftwareDeployment).get(deployment_id)
if (result is not None and context is not None and
context.tenant_id not in (result.tenant,
result.stack_user_project_id)):
result = None
if not result:
raise exception.NotFound(_('Deployment with id %s not found') %
deployment_id)
return result
def software_deployment_get_all(context, server_id=None):
sd = models.SoftwareDeployment
query = context.session.query(
sd
).filter(sqlalchemy.or_(
sd.tenant == context.tenant_id,
sd.stack_user_project_id == context.tenant_id)
).order_by(sd.created_at)
if server_id:
query = query.filter_by(server_id=server_id)
return query.all()
def software_deployment_update(context, deployment_id, values):
deployment = software_deployment_get(context, deployment_id)
update_and_save(context, deployment, values)
return deployment
def software_deployment_delete(context, deployment_id):
deployment = software_deployment_get(context, deployment_id)
session = context.session
with session.begin(subtransactions=True):
session.delete(deployment)
def snapshot_create(context, values):
obj_ref = models.Snapshot()
obj_ref.update(values)
obj_ref.save(context.session)
return obj_ref
def snapshot_get(context, snapshot_id):
result = context.session.query(models.Snapshot).get(snapshot_id)
if (result is not None and context is not None and
context.tenant_id != result.tenant):
result = None
if not result:
raise exception.NotFound(_('Snapshot with id %s not found') %
snapshot_id)
return result
def snapshot_get_by_stack(context, snapshot_id, stack):
snapshot = snapshot_get(context, snapshot_id)
if snapshot.stack_id != stack.id:
raise exception.SnapshotNotFound(snapshot=snapshot_id,
stack=stack.name)
return snapshot
def snapshot_update(context, snapshot_id, values):
snapshot = snapshot_get(context, snapshot_id)
snapshot.update(values)
snapshot.save(context.session)
return snapshot
def snapshot_delete(context, snapshot_id):
snapshot = snapshot_get(context, snapshot_id)
session = orm_session.Session.object_session(snapshot)
with session.begin():
session.delete(snapshot)
def snapshot_get_all(context, stack_id):
return context.session.query(models.Snapshot).filter_by(
stack_id=stack_id, tenant=context.tenant_id)
def service_create(context, values):
service = models.Service()
service.update(values)
service.save(context.session)
return service
def service_update(context, service_id, values):
service = service_get(context, service_id)
values.update({'updated_at': timeutils.utcnow()})
service.update(values)
service.save(context.session)
return service
def service_delete(context, service_id, soft_delete=True):
service = service_get(context, service_id)
session = context.session
with session.begin():
if soft_delete:
delete_softly(context, service)
else:
session.delete(service)
def service_get(context, service_id):
result = context.session.query(models.Service).get(service_id)
if result is None:
raise exception.EntityNotFound(entity='Service', name=service_id)
return result
def service_get_all(context):
return (context.session.query(models.Service).
filter_by(deleted_at=None).all())
def service_get_all_by_args(context, host, binary, hostname):
return (context.session.query(models.Service).
filter_by(host=host).
filter_by(binary=binary).
filter_by(hostname=hostname).all())
def purge_deleted(age, granularity='days', project_id=None):
try:
age = int(age)
except ValueError:
raise exception.Error(_("age should be an integer"))
if age < 0:
raise exception.Error(_("age should be a positive integer"))
if granularity not in ('days', 'hours', 'minutes', 'seconds'):
raise exception.Error(
_("granularity should be days, hours, minutes, or seconds"))
if granularity == 'days':
age = age * 86400
elif granularity == 'hours':
age = age * 3600
elif granularity == 'minutes':
age = age * 60
time_line = timeutils.utcnow() - datetime.timedelta(seconds=age)
engine = get_engine()
meta = sqlalchemy.MetaData()
meta.bind = engine
stack = sqlalchemy.Table('stack', meta, autoload=True)
stack_lock = sqlalchemy.Table('stack_lock', meta, autoload=True)
stack_tag = sqlalchemy.Table('stack_tag', meta, autoload=True)
resource = sqlalchemy.Table('resource', meta, autoload=True)
resource_data = sqlalchemy.Table('resource_data', meta, autoload=True)
event = sqlalchemy.Table('event', meta, autoload=True)
raw_template = sqlalchemy.Table('raw_template', meta, autoload=True)
raw_template_files = sqlalchemy.Table('raw_template_files', meta,
autoload=True)
user_creds = sqlalchemy.Table('user_creds', meta, autoload=True)
service = sqlalchemy.Table('service', meta, autoload=True)
syncpoint = sqlalchemy.Table('sync_point', meta, autoload=True)
# find the soft-deleted stacks that are past their expiry
if project_id:
stack_where = sqlalchemy.select([
stack.c.id, stack.c.raw_template_id,
stack.c.prev_raw_template_id,
stack.c.user_creds_id]).where(and_(
stack.c.tenant == project_id,
stack.c.deleted_at < time_line))
else:
stack_where = sqlalchemy.select([
stack.c.id, stack.c.raw_template_id,
stack.c.prev_raw_template_id,
stack.c.user_creds_id]).where(
stack.c.deleted_at < time_line)
stacks = list(engine.execute(stack_where))
if stacks:
stack_ids = [i[0] for i in stacks]
# delete stack locks (just in case some got stuck)
stack_lock_del = stack_lock.delete().where(
stack_lock.c.stack_id.in_(stack_ids))
engine.execute(stack_lock_del)
# delete stack tags
stack_tag_del = stack_tag.delete().where(
stack_tag.c.stack_id.in_(stack_ids))
engine.execute(stack_tag_del)
# delete resource_data
res_where = sqlalchemy.select([resource.c.id]).where(
resource.c.stack_id.in_(stack_ids))
res_data_del = resource_data.delete().where(
resource_data.c.resource_id.in_(res_where))
engine.execute(res_data_del)
# delete resources
res_del = resource.delete().where(resource.c.stack_id.in_(stack_ids))
engine.execute(res_del)
# delete events
event_del = event.delete().where(event.c.stack_id.in_(stack_ids))
engine.execute(event_del)
# clean up any sync_points that may have lingered
sync_del = syncpoint.delete().where(
syncpoint.c.stack_id.in_(stack_ids))
engine.execute(sync_del)
# delete the stacks
stack_del = stack.delete().where(stack.c.id.in_(stack_ids))
engine.execute(stack_del)
# delete orphaned raw templates
raw_template_ids = [i[1] for i in stacks if i[1] is not None]
raw_template_ids.extend(i[2] for i in stacks if i[2] is not None)
if raw_template_ids:
# keep those still referenced
raw_tmpl_sel = sqlalchemy.select([stack.c.raw_template_id]).where(
stack.c.raw_template_id.in_(raw_template_ids))
raw_tmpl = [i[0] for i in engine.execute(raw_tmpl_sel)]
raw_template_ids = set(raw_template_ids) - set(raw_tmpl)
raw_tmpl_sel = sqlalchemy.select(
[stack.c.prev_raw_template_id]).where(
stack.c.prev_raw_template_id.in_(raw_template_ids))
raw_tmpl = [i[0] for i in engine.execute(raw_tmpl_sel)]
raw_template_ids = raw_template_ids - set(raw_tmpl)
raw_tmpl_file_sel = sqlalchemy.select(
[raw_template.c.files_id]).where(
raw_template.c.id.in_(raw_template_ids))
raw_tmpl_file_ids = [i[0] for i in engine.execute(
raw_tmpl_file_sel)]
raw_templ_del = raw_template.delete().where(
raw_template.c.id.in_(raw_template_ids))
engine.execute(raw_templ_del)
# purge any raw_template_files that are no longer referenced
if raw_tmpl_file_ids:
raw_tmpl_file_sel = sqlalchemy.select(
[raw_template.c.files_id]).where(
raw_template.c.files_id.in_(raw_tmpl_file_ids))
raw_tmpl_files = [i[0] for i in engine.execute(
raw_tmpl_file_sel)]
raw_tmpl_file_ids = set(raw_tmpl_file_ids) \
- set(raw_tmpl_files)
raw_tmpl_file_del = raw_template_files.delete().where(
raw_template_files.c.id.in_(raw_tmpl_file_ids))
engine.execute(raw_tmpl_file_del)
# purge any user creds that are no longer referenced
user_creds_ids = [i[3] for i in stacks if i[3] is not None]
if user_creds_ids:
# keep those still referenced
user_sel = sqlalchemy.select([stack.c.user_creds_id]).where(
stack.c.user_creds_id.in_(user_creds_ids))
users = [i[0] for i in engine.execute(user_sel)]
user_creds_ids = set(user_creds_ids) - set(users)
usr_creds_del = user_creds.delete().where(
user_creds.c.id.in_(user_creds_ids))
engine.execute(usr_creds_del)
# Purge deleted services
srvc_del = service.delete().where(service.c.deleted_at < time_line)
engine.execute(srvc_del)
def sync_point_delete_all_by_stack_and_traversal(context, stack_id,
traversal_id):
rows_deleted = context.session.query(models.SyncPoint).filter_by(
stack_id=stack_id, traversal_id=traversal_id).delete()
return rows_deleted
@oslo_db_api.wrap_db_retry(max_retries=3, retry_on_deadlock=True,
retry_interval=0.5, inc_retry_interval=True)
def sync_point_create(context, values):
values['entity_id'] = str(values['entity_id'])
sync_point_ref = models.SyncPoint()
sync_point_ref.update(values)
sync_point_ref.save(context.session)
return sync_point_ref
def sync_point_get(context, entity_id, traversal_id, is_update):
entity_id = str(entity_id)
return context.session.query(models.SyncPoint).get(
(entity_id, traversal_id, is_update)
)
def sync_point_update_input_data(context, entity_id,
traversal_id, is_update, atomic_key,
input_data):
entity_id = str(entity_id)
rows_updated = context.session.query(models.SyncPoint).filter_by(
entity_id=entity_id,
traversal_id=traversal_id,
is_update=is_update,
atomic_key=atomic_key
).update({"input_data": input_data, "atomic_key": atomic_key + 1})
return rows_updated
def db_sync(engine, version=None):
"""Migrate the database to `version` or the most recent version."""
if version is not None and int(version) < db_version(engine):
raise exception.Error(_("Cannot migrate to lower schema version."))
return migration.db_sync(engine, version=version)
def db_version(engine):
"""Display the current database version."""
return migration.db_version(engine)
def db_encrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50,
verbose=False):
"""Encrypt parameters and properties for all templates in db.
:param ctxt: RPC context
:param encryption_key: key that will be used for parameter and property
encryption
:param batch_size: number of templates requested from db in each iteration.
50 means that heat requests 50 templates, encrypt them
and proceed with next 50 items.
:param verbose: log an INFO message when processing of each raw_template or
resource begins or ends
:return: list of exceptions encountered during encryption
"""
from heat.engine import template
session = get_session()
with session.begin():
query = session.query(models.RawTemplate)
excs = []
for raw_template in _get_batch(
session=session, ctxt=ctxt, query=query,
model=models.RawTemplate, batch_size=batch_size):
try:
if verbose:
LOG.info(_LI("Processing raw_template %(id)d..."),
{'id': raw_template.id})
tmpl = template.Template.load(
ctxt, raw_template.id, raw_template)
param_schemata = tmpl.param_schemata()
env = raw_template.environment
if (not env or
'parameters' not in env or
not param_schemata):
continue
if 'encrypted_param_names' in env:
encrypted_params = env['encrypted_param_names']
else:
encrypted_params = []
for param_name, param_val in env['parameters'].items():
if (param_name in encrypted_params or
param_name not in param_schemata or
not param_schemata[param_name].hidden):
continue
encrypted_val = crypt.encrypt(six.text_type(param_val),
encryption_key)
env['parameters'][param_name] = encrypted_val
encrypted_params.append(param_name)
if encrypted_params:
environment = env.copy()
environment['encrypted_param_names'] = encrypted_params
raw_template_update(ctxt, raw_template.id,
{'environment': environment})
except Exception as exc:
LOG.exception(_LE('Failed to encrypt parameters of raw '
'template %(id)d'), {'id': raw_template.id})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished processing raw_template "
"%(id)d."), {'id': raw_template.id})
query = session.query(models.Resource).filter(
~models.Resource.properties_data.is_(None),
~models.Resource.properties_data_encrypted.is_(True))
for resource in _get_batch(
session=session, ctxt=ctxt, query=query, model=models.Resource,
batch_size=batch_size):
try:
if verbose:
LOG.info(_LI("Processing resource %(id)d..."),
{'id': resource.id})
result = {}
if not resource.properties_data:
continue
for prop_name, prop_value in resource.properties_data.items():
prop_string = jsonutils.dumps(prop_value)
encrypted_value = crypt.encrypt(prop_string,
encryption_key)
result[prop_name] = encrypted_value
resource.properties_data = result
resource.properties_data_encrypted = True
resource_update(ctxt, resource.id,
{'properties_data': result,
'properties_data_encrypted': True},
resource.atomic_key)
except Exception as exc:
LOG.exception(_LE('Failed to encrypt properties_data of '
'resource %(id)d'), {'id': resource.id})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished processing resource "
"%(id)d."), {'id': resource.id})
return excs
def db_decrypt_parameters_and_properties(ctxt, encryption_key, batch_size=50,
verbose=False):
"""Decrypt parameters and properties for all templates in db.
:param ctxt: RPC context
:param encryption_key: key that will be used for parameter and property
decryption
:param batch_size: number of templates requested from db in each iteration.
50 means that heat requests 50 templates, encrypt them
and proceed with next 50 items.
:param verbose: log an INFO message when processing of each raw_template or
resource begins or ends
:return: list of exceptions encountered during decryption
"""
session = get_session()
excs = []
with session.begin():
query = session.query(models.RawTemplate)
for raw_template in _get_batch(
session=session, ctxt=ctxt, query=query,
model=models.RawTemplate, batch_size=batch_size):
try:
if verbose:
LOG.info(_LI("Processing raw_template %(id)d..."),
{'id': raw_template.id})
parameters = raw_template.environment['parameters']
encrypted_params = raw_template.environment[
'encrypted_param_names']
for param_name in encrypted_params:
method, value = parameters[param_name]
decrypted_val = crypt.decrypt(method, value,
encryption_key)
parameters[param_name] = decrypted_val
environment = raw_template.environment.copy()
environment['encrypted_param_names'] = []
raw_template_update(ctxt, raw_template.id,
{'environment': environment})
except Exception as exc:
LOG.exception(_LE('Failed to decrypt parameters of raw '
'template %(id)d'), {'id': raw_template.id})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished processing raw_template "
"%(id)d."), {'id': raw_template.id})
query = session.query(models.Resource).filter(
~models.Resource.properties_data.is_(None),
models.Resource.properties_data_encrypted.is_(True))
for resource in _get_batch(
session=session, ctxt=ctxt, query=query, model=models.Resource,
batch_size=batch_size):
try:
if verbose:
LOG.info(_LI("Processing resource %(id)d..."),
{'id': resource.id})
result = {}
for prop_name, prop_value in resource.properties_data.items():
method, value = prop_value
decrypted_value = crypt.decrypt(method, value,
encryption_key)
prop_string = jsonutils.loads(decrypted_value)
result[prop_name] = prop_string
resource.properties_data = result
resource.properties_data_encrypted = False
resource_update(ctxt, resource.id,
{'properties_data': result,
'properties_data_encrypted': False},
resource.atomic_key)
except Exception as exc:
LOG.exception(_LE('Failed to decrypt properties_data of '
'resource %(id)d'), {'id': resource.id})
excs.append(exc)
continue
finally:
if verbose:
LOG.info(_LI("Finished processing resource "
"%(id)d."), {'id': resource.id})
return excs
def _get_batch(session, ctxt, query, model, batch_size=50):
last_batch_marker = None
while True:
results = _paginate_query(
context=ctxt, query=query, model=model, limit=batch_size,
marker=last_batch_marker).all()
if not results:
break
else:
for result in results:
yield result
last_batch_marker = results[-1].id
def reset_stack_status(context, stack_id, stack=None):
if stack is None:
stack = context.session.query(models.Stack).get(stack_id)
if stack is None:
raise exception.NotFound(_('Stack with id %s not found') % stack_id)
session = context.session
with session.begin():
query = context.session.query(models.Resource).filter_by(
status='IN_PROGRESS', stack_id=stack_id)
query.update({'status': 'FAILED',
'status_reason': 'Stack status manually reset'})
query = context.session.query(models.ResourceData)
query = query.join(models.Resource)
query = query.filter_by(stack_id=stack_id)
query = query.filter(
models.ResourceData.key.in_(heat_environment.HOOK_TYPES))
data_ids = [data.id for data in query]
if data_ids:
query = context.session.query(models.ResourceData)
query = query.filter(models.ResourceData.id.in_(data_ids))
query.delete(synchronize_session='fetch')
query = context.session.query(models.Stack).filter_by(owner_id=stack_id)
for child in query:
reset_stack_status(context, child.id, child)
with session.begin():
if stack.status == 'IN_PROGRESS':
stack.status = 'FAILED'
stack.status_reason = 'Stack status manually reset'
session.query(
models.StackLock
).filter_by(stack_id=stack_id).delete()
|
cwolferh/heat-scratch
|
heat/db/sqlalchemy/api.py
|
Python
|
apache-2.0
| 57,779
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
from googlemock.test import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def NormalizeErrorMarker(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = NormalizeErrorMarker(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEquals(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEquals(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main()
|
grpc/grpc-ios
|
native_src/third_party/googletest/googlemock/test/gmock_output_test.py
|
Python
|
apache-2.0
| 6,175
|
import numpy as np
from nilearn.image.image import check_niimg
from nilearn.image.resampling import get_bounds
from nilearn.image.image import _crop_img_to as crop_img_to
def crop_img(img, rtol=1e-8, copy=True, return_slices=False, pad=True, percentile=None, return_affine=False):
"""Crops img as much as possible
Will crop img, removing as many zero entries as possible
without touching non-zero entries. Will leave one voxel of
zero padding around the obtained non-zero area in order to
avoid sampling issues later on.
Parameters
----------
img: Niimg-like object
See http://nilearn.github.io/manipulating_images/input_output.html
img to be cropped.
rtol: float
relative tolerance (with respect to maximal absolute
value of the image), under which values are considered
negligeable and thus croppable.
copy: boolean
Specifies whether cropped data is copied or not.
return_slices: boolean
If True, the slices that define the cropped image will be returned.
pad: boolean or integer
If True, an extra slice in each direction will be added to the image. If integer > 0 then the pad width will
be set to that integer.
percentile: integer or None
If not None, then the image will be crop out slices below the given percentile
Returns
-------
cropped_img: image
Cropped version of the input image
"""
img = check_niimg(img)
data = img.get_data()
if percentile is not None:
passes_threshold = data > np.percentile(data, percentile)
else:
infinity_norm = max(-data.min(), data.max())
passes_threshold = np.logical_or(data < -rtol * infinity_norm,
data > rtol * infinity_norm)
if data.ndim == 4:
passes_threshold = np.any(passes_threshold, axis=-1)
coords = np.array(np.where(passes_threshold))
start = coords.min(axis=1)
end = coords.max(axis=1) + 1
if int(pad) > 0:
pad_width = int(pad)
# pad with one voxel to avoid resampling problems
start = np.maximum(start - pad_width, 0)
end = np.minimum(end + pad_width, data.shape[:3])
slices = [slice(s, e) for s, e in zip(start, end)]
if return_slices:
return slices
if return_affine:
return image_slices_to_affine(img, slices), end - start
return crop_img_to(img, slices, copy=copy)
def image_slices_to_affine(image, slices):
affine = image.affine
linear_part = affine[:3, :3]
old_origin = affine[:3, 3]
new_origin_voxel = np.array([s.start for s in slices])
new_origin = old_origin + linear_part.dot(new_origin_voxel)
new_affine = np.eye(4)
new_affine[:3, :3] = linear_part
new_affine[:3, 3] = new_origin
return new_affine
def run_with_background_correction(func, image, background=None, returns_array=False, reset_background=True,
axis=(-3, -2, -1), **kwargs):
data = image.get_data()
if background is None:
background = get_background_values(data, axis=axis)
# set background to zero
data[:] -= background
# perform function on image
image = func(image, **kwargs)
# set the background back to what it was originally
if reset_background:
if returns_array:
# the function called should have returned an array
data = image
else:
# the function called should have returned an image
data = image.get_data()
data[:] += background
return image
def get_background_values(data, axis=(-3, -2, -1)):
background = data.min(axis=axis)
if isinstance(background, np.ndarray):
while len(background.shape) < len(data.shape):
background = background[..., None]
return background
def reorder_affine(affine, shape):
"""
Modified from nilearn.image.resampling.reorder_img and nilearn.image.resampling.resample_img
:param affine:
:param shape:
:return:
"""
Q, R = np.linalg.qr(affine[:3, :3])
_affine = np.diag(np.abs(np.diag(R))[np.abs(Q).argmax(axis=1)])
target_affine = np.eye(4)
target_affine[:3, :3] = _affine
transform_affine = np.linalg.inv(target_affine).dot(affine)
(xmin, xmax), (ymin, ymax), (zmin, zmax) = get_bounds(shape[:3], transform_affine)
offset = target_affine[:3, :3].dot([xmin, ymin, zmin])
target_affine[:3, 3] = offset
return target_affine
|
ellisdg/3DUnetCNN
|
unet3d/utils/nilearn_custom_utils/nilearn_utils.py
|
Python
|
mit
| 4,512
|
from cyder.api.v1.tests.base import APITests
from cyder.core.system.models import System
from cyder.cydns.nameserver.models import Nameserver
from cyder.cydns.soa.models import SOA
from cyder.cydns.tests.utils import create_zone
from cyder.cydhcp.interface.static_intr.models import StaticInterface
from cyder.cydhcp.range.models import Range
from cyder.cydhcp.network.models import Network
from cyder.cydns.domain.models import Domain
class StaticInterfaceV4API_Test(APITests):
__test__ = True
model = StaticInterface
def create_data(self):
Domain.objects.create(name='arpa')
system = System.objects.create(name="TestSystem", ctnr=self.ctnr)
Domain.objects.create(name='in-addr.arpa')
create_zone('11.in-addr.arpa')
net = Network.objects.create(
network_str='11.12.14.0/8', ip_type='4')
r = Range.objects.create(
network=net, range_type='st', ip_type='4',
start_str='11.12.14.253', end_str='11.12.14.254')
self.ctnr.ranges.add(r)
return StaticInterface.objects.create(
ctnr=self.ctnr, description='Test Static Interface', ttl=420,
mac='11:22:33:44:55:00', system=system, label='stat',
domain=self.domain, dhcp_enabled=False, dns_enabled=True,
ip_str='11.12.14.253', ip_type='4')
class StaticInterfaceV6API_Test(APITests):
__test__ = True
model = StaticInterface
def create_data(self):
Domain.objects.create(name='arpa')
system = System.objects.create(name="TestSystem", ctnr=self.ctnr)
Domain.objects.create(name='ip6.arpa')
create_zone('2.ip6.arpa')
net = Network.objects.create(network_str='2001::/16', ip_type='6')
r = Range.objects.create(
network=net, range_type='st', ip_type='6', start_str='2001::1',
end_str='2001:ffff:ffff:ffff:ffff:ffff:ffff:fffe')
self.ctnr.ranges.add(r)
return StaticInterface.objects.create(
ctnr=self.ctnr, description='Test Static Interface', ttl=420,
mac='11:22:33:44:55:00', system=system, label='stat',
domain=self.domain, dhcp_enabled=False, dns_enabled=True,
ip_str='2001:0db8:85a3:0000:0000:8a2e:0370:7344', ip_type='6')
|
murrown/cyder
|
cyder/api/v1/endpoints/dhcp/static_interface/tests.py
|
Python
|
bsd-3-clause
| 2,278
|
__author__ = 'RajivSubramanian'
|
rajivm1991/django-materialize-form
|
materializeform/templatetags/__init__.py
|
Python
|
mit
| 32
|
""" DIRECT Nine DoF Manipulation Panel """
__all__ = ['Placer', 'place']
# Import Tkinter, Pmw, and the dial code from this directory tree.
from pandac.PandaModules import *
from direct.showbase.TkGlobal import *
from direct.tkwidgets.AppShell import *
from direct.tkwidgets import Dial
from direct.tkwidgets import Floater
from direct.directtools.DirectGlobals import ZERO_VEC, UNIT_VEC
from Tkinter import *
import Pmw
"""
TODO:
Task to monitor pose
"""
class Placer(AppShell):
# Override class variables here
appname = 'Placer Panel'
frameWidth = 625
frameHeight = 215
usecommandarea = 0
usestatusarea = 0
def __init__(self, parent = None, **kw):
INITOPT = Pmw.INITOPT
optiondefs = (
('title', self.appname, None),
('nodePath', base.direct.camera, None),
)
self.defineoptions(kw, optiondefs)
# Call superclass initialization function
AppShell.__init__(self)
self.initialiseoptions(Placer)
def appInit(self):
# Initialize state
self.tempCS = base.direct.group.attachNewNode('placerTempCS')
self.orbitFromCS = base.direct.group.attachNewNode(
'placerOrbitFromCS')
self.orbitToCS = base.direct.group.attachNewNode('placerOrbitToCS')
self.refCS = self.tempCS
# Dictionary keeping track of all node paths manipulated so far
self.nodePathDict = {}
self.nodePathDict['camera'] = base.direct.camera
self.nodePathDict['widget'] = base.direct.widget
self.nodePathNames = ['camera', 'widget', 'selected']
self.refNodePathDict = {}
self.refNodePathDict['parent'] = self['nodePath'].getParent()
self.refNodePathDict['render'] = render
self.refNodePathDict['camera'] = base.direct.camera
self.refNodePathDict['widget'] = base.direct.widget
self.refNodePathNames = ['parent', 'self', 'render',
'camera', 'widget', 'selected']
# Initial state
self.initPos = Vec3(0)
self.initHpr = Vec3(0)
self.initScale = Vec3(1)
self.deltaHpr = Vec3(0)
# Offset for orbital mode
self.posOffset = Vec3(0)
# Set up event hooks
self.undoEvents = [('DIRECT_undo', self.undoHook),
('DIRECT_pushUndo', self.pushUndoHook),
('DIRECT_undoListEmpty', self.undoListEmptyHook),
('DIRECT_redo', self.redoHook),
('DIRECT_pushRedo', self.pushRedoHook),
('DIRECT_redoListEmpty', self.redoListEmptyHook)]
for event, method in self.undoEvents:
self.accept(event, method)
# Init movement mode
self.movementMode = 'Relative To:'
def createInterface(self):
# The interior of the toplevel panel
interior = self.interior()
interior['relief'] = FLAT
# Add placer commands to menubar
self.menuBar.addmenu('Placer', 'Placer Panel Operations')
self.menuBar.addmenuitem('Placer', 'command',
'Zero Node Path',
label = 'Zero All',
command = self.zeroAll)
self.menuBar.addmenuitem('Placer', 'command',
'Reset Node Path',
label = 'Reset All',
command = self.resetAll)
self.menuBar.addmenuitem('Placer', 'command',
'Print Node Path Info',
label = 'Print Info',
command = self.printNodePathInfo)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget visability',
label = 'Toggle Widget Vis',
command = base.direct.toggleWidgetVis)
self.menuBar.addmenuitem(
'Placer', 'command',
'Toggle widget manipulation mode',
label = 'Toggle Widget Mode',
command = base.direct.manipulationControl.toggleObjectHandlesMode)
# Get a handle to the menu frame
menuFrame = self.menuFrame
self.nodePathMenu = Pmw.ComboBox(
menuFrame, labelpos = W, label_text = 'Node Path:',
entry_width = 20,
selectioncommand = self.selectNodePathNamed,
scrolledlist_items = self.nodePathNames)
self.nodePathMenu.selectitem('selected')
self.nodePathMenuEntry = (
self.nodePathMenu.component('entryfield_entry'))
self.nodePathMenuBG = (
self.nodePathMenuEntry.configure('background')[3])
self.nodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.nodePathMenu, 'Select node path to manipulate')
modeMenu = Pmw.OptionMenu(menuFrame,
items = ('Relative To:',
'Orbit:'),
initialitem = 'Relative To:',
command = self.setMovementMode,
menubutton_width = 8)
modeMenu.pack(side = 'left', expand = 0)
self.bind(modeMenu, 'Select manipulation mode')
self.refNodePathMenu = Pmw.ComboBox(
menuFrame, entry_width = 16,
selectioncommand = self.selectRefNodePathNamed,
scrolledlist_items = self.refNodePathNames)
self.refNodePathMenu.selectitem('parent')
self.refNodePathMenuEntry = (
self.refNodePathMenu.component('entryfield_entry'))
self.refNodePathMenu.pack(side = 'left', fill = 'x', expand = 1)
self.bind(self.refNodePathMenu, 'Select relative node path')
self.undoButton = Button(menuFrame, text = 'Undo',
command = base.direct.undo)
if base.direct.undoList:
self.undoButton['state'] = 'normal'
else:
self.undoButton['state'] = 'disabled'
self.undoButton.pack(side = 'left', expand = 0)
self.bind(self.undoButton, 'Undo last operation')
self.redoButton = Button(menuFrame, text = 'Redo',
command = base.direct.redo)
if base.direct.redoList:
self.redoButton['state'] = 'normal'
else:
self.redoButton['state'] = 'disabled'
self.redoButton.pack(side = 'left', expand = 0)
self.bind(self.redoButton, 'Redo last operation')
# Create and pack the Pos Controls
posGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Position',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = RIDGE)
posMenubutton = posGroup.component('tag')
self.bind(posMenubutton, 'Position menu operations')
posMenu = Menu(posMenubutton, tearoff = 0)
posMenu.add_command(label = 'Set to zero', command = self.zeroPos)
posMenu.add_command(label = 'Reset initial',
command = self.resetPos)
posMenubutton['menu'] = posMenu
posGroup.pack(side='left', fill = 'both', expand = 1)
posInterior = posGroup.interior()
# Create the dials
self.posX = self.createcomponent('posX', (), None,
Floater.Floater, (posInterior,),
text = 'X', relief = FLAT,
value = 0.0,
label_foreground = 'Red')
self.posX['commandData'] = ['x']
self.posX['preCallback'] = self.xformStart
self.posX['postCallback'] = self.xformStop
self.posX['callbackData'] = ['x']
self.posX.pack(expand=1, fill='both')
self.posY = self.createcomponent('posY', (), None,
Floater.Floater, (posInterior,),
text = 'Y', relief = FLAT,
value = 0.0,
label_foreground = '#00A000')
self.posY['commandData'] = ['y']
self.posY['preCallback'] = self.xformStart
self.posY['postCallback'] = self.xformStop
self.posY['callbackData'] = ['y']
self.posY.pack(expand=1, fill='both')
self.posZ = self.createcomponent('posZ', (), None,
Floater.Floater, (posInterior,),
text = 'Z', relief = FLAT,
value = 0.0,
label_foreground = 'Blue')
self.posZ['commandData'] = ['z']
self.posZ['preCallback'] = self.xformStart
self.posZ['postCallback'] = self.xformStop
self.posZ['callbackData'] = ['z']
self.posZ.pack(expand=1, fill='both')
# Create and pack the Hpr Controls
hprGroup = Pmw.Group(interior,
tag_pyclass = Menubutton,
tag_text = 'Orientation',
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = RIDGE)
hprMenubutton = hprGroup.component('tag')
self.bind(hprMenubutton, 'Orientation menu operations')
hprMenu = Menu(hprMenubutton, tearoff = 0)
hprMenu.add_command(label = 'Set to zero', command = self.zeroHpr)
hprMenu.add_command(label = 'Reset initial', command = self.resetHpr)
hprMenubutton['menu'] = hprMenu
hprGroup.pack(side='left', fill = 'both', expand = 1)
hprInterior = hprGroup.interior()
# Create the dials
self.hprH = self.createcomponent('hprH', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'H', value = 0.0,
relief = FLAT,
label_foreground = 'blue')
self.hprH['commandData'] = ['h']
self.hprH['preCallback'] = self.xformStart
self.hprH['postCallback'] = self.xformStop
self.hprH['callbackData'] = ['h']
self.hprH.pack(expand=1, fill='both')
self.hprP = self.createcomponent('hprP', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'P', value = 0.0,
relief = FLAT,
label_foreground = 'red')
self.hprP['commandData'] = ['p']
self.hprP['preCallback'] = self.xformStart
self.hprP['postCallback'] = self.xformStop
self.hprP['callbackData'] = ['p']
self.hprP.pack(expand=1, fill='both')
self.hprR = self.createcomponent('hprR', (), None,
Dial.AngleDial, (hprInterior,),
style = 'mini',
text = 'R', value = 0.0,
relief = FLAT,
label_foreground = '#00A000')
self.hprR['commandData'] = ['r']
self.hprR['preCallback'] = self.xformStart
self.hprR['postCallback'] = self.xformStop
self.hprR['callbackData'] = ['r']
self.hprR.pack(expand=1, fill='both')
# Create and pack the Scale Controls
# The available scaling modes
self.scalingMode = StringVar()
self.scalingMode.set('Scale Uniform')
# The scaling widgets
scaleGroup = Pmw.Group(interior,
tag_text = 'Scale Uniform',
tag_pyclass = Menubutton,
tag_font=('MSSansSerif', 14),
tag_activebackground = '#909090',
ring_relief = RIDGE)
self.scaleMenubutton = scaleGroup.component('tag')
self.bind(self.scaleMenubutton, 'Scale menu operations')
self.scaleMenubutton['textvariable'] = self.scalingMode
# Scaling menu
scaleMenu = Menu(self.scaleMenubutton, tearoff = 0)
scaleMenu.add_command(label = 'Set to unity',
command = self.unitScale)
scaleMenu.add_command(label = 'Reset initial',
command = self.resetScale)
scaleMenu.add_radiobutton(label = 'Scale Free',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Uniform',
variable = self.scalingMode)
scaleMenu.add_radiobutton(label = 'Scale Proportional',
variable = self.scalingMode)
self.scaleMenubutton['menu'] = scaleMenu
# Pack group widgets
scaleGroup.pack(side='left', fill = 'both', expand = 1)
scaleInterior = scaleGroup.interior()
# Create the dials
self.scaleX = self.createcomponent('scaleX', (), None,
Floater.Floater, (scaleInterior,),
text = 'X Scale',
relief = FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Red')
self.scaleX['commandData'] = ['sx']
self.scaleX['callbackData'] = ['sx']
self.scaleX['preCallback'] = self.xformStart
self.scaleX['postCallback'] = self.xformStop
self.scaleX.pack(expand=1, fill='both')
self.scaleY = self.createcomponent('scaleY', (), None,
Floater.Floater, (scaleInterior,),
text = 'Y Scale',
relief = FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = '#00A000')
self.scaleY['commandData'] = ['sy']
self.scaleY['callbackData'] = ['sy']
self.scaleY['preCallback'] = self.xformStart
self.scaleY['postCallback'] = self.xformStop
self.scaleY.pack(expand=1, fill='both')
self.scaleZ = self.createcomponent('scaleZ', (), None,
Floater.Floater, (scaleInterior,),
text = 'Z Scale',
relief = FLAT,
min = 0.0001, value = 1.0,
resetValue = 1.0,
label_foreground = 'Blue')
self.scaleZ['commandData'] = ['sz']
self.scaleZ['callbackData'] = ['sz']
self.scaleZ['preCallback'] = self.xformStart
self.scaleZ['postCallback'] = self.xformStop
self.scaleZ.pack(expand=1, fill='both')
# Make sure appropriate labels are showing
self.setMovementMode('Relative To:')
# Set up placer for inital node path
self.selectNodePathNamed('init')
self.selectRefNodePathNamed('parent')
# Update place to reflect initial state
self.updatePlacer()
# Now that you're done setting up, attach commands
self.posX['command'] = self.xform
self.posY['command'] = self.xform
self.posZ['command'] = self.xform
self.hprH['command'] = self.xform
self.hprP['command'] = self.xform
self.hprR['command'] = self.xform
self.scaleX['command'] = self.xform
self.scaleY['command'] = self.xform
self.scaleZ['command'] = self.xform
### WIDGET OPERATIONS ###
def setMovementMode(self, movementMode):
# Set prefix
namePrefix = ''
self.movementMode = movementMode
if (movementMode == 'Relative To:'):
namePrefix = 'Relative '
elif (movementMode == 'Orbit:'):
namePrefix = 'Orbit '
# Update pos widgets
self.posX['text'] = namePrefix + 'X'
self.posY['text'] = namePrefix + 'Y'
self.posZ['text'] = namePrefix + 'Z'
# Update hpr widgets
if (movementMode == 'Orbit:'):
namePrefix = 'Orbit delta '
self.hprH['text'] = namePrefix + 'H'
self.hprP['text'] = namePrefix + 'P'
self.hprR['text'] = namePrefix + 'R'
# Update temp cs and initialize widgets
self.updatePlacer()
def setScalingMode(self):
if self['nodePath']:
scale = self['nodePath'].getScale()
if ((scale[0] != scale[1]) or
(scale[0] != scale[2]) or
(scale[1] != scale[2])):
self.scalingMode.set('Scale Free')
def selectNodePathNamed(self, name):
nodePath = None
if name == 'init':
nodePath = self['nodePath']
# Add Combo box entry for the initial node path
self.addNodePath(nodePath)
elif name == 'selected':
nodePath = base.direct.selected.last
# Add Combo box entry for this selected object
self.addNodePath(nodePath)
else:
nodePath = self.nodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.nodePathMenu.component('scrolledlist')
listbox.setlist(self.nodePathNames)
else:
if name == 'widget':
# Record relationship between selected nodes and widget
base.direct.selected.getWrtAll()
# Update active node path
self.setActiveNodePath(nodePath)
def setActiveNodePath(self, nodePath):
self['nodePath'] = nodePath
if self['nodePath']:
self.nodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Check to see if node path and ref node path are the same
if ((self.refCS != None) and
(self.refCS.id() == self['nodePath'].id())):
# Yes they are, use temp CS as ref
# This calls updatePlacer
self.setReferenceNodePath(self.tempCS)
# update listbox accordingly
self.refNodePathMenu.selectitem('parent')
else:
# Record initial value and initialize the widgets
self.updatePlacer()
# Record initial position
self.updateResetValues(self['nodePath'])
# Set scaling mode based on node path's current scale
self.setScalingMode()
else:
# Flash entry
self.nodePathMenuEntry.configure(background = 'Pink')
def selectRefNodePathNamed(self, name):
nodePath = None
if name == 'self':
nodePath = self.tempCS
elif name == 'selected':
nodePath = base.direct.selected.last
# Add Combo box entry for this selected object
self.addRefNodePath(nodePath)
elif name == 'parent':
nodePath = self['nodePath'].getParent()
else:
nodePath = self.refNodePathDict.get(name, None)
if (nodePath == None):
# See if this evaluates into a node path
try:
nodePath = eval(name)
if isinstance(nodePath, NodePath):
self.addRefNodePath(nodePath)
else:
# Good eval but not a node path, give up
nodePath = None
except:
# Bogus eval
nodePath = None
# Clear bogus entry from listbox
listbox = self.refNodePathMenu.component('scrolledlist')
listbox.setlist(self.refNodePathNames)
# Check to see if node path and ref node path are the same
if (nodePath != None) and (nodePath.id() == self['nodePath'].id()):
# Yes they are, use temp CS and update listbox accordingly
nodePath = self.tempCS
self.refNodePathMenu.selectitem('parent')
# Update ref node path
self.setReferenceNodePath(nodePath)
def setReferenceNodePath(self, nodePath):
self.refCS = nodePath
if self.refCS:
self.refNodePathMenuEntry.configure(
background = self.nodePathMenuBG)
# Update placer to reflect new state
self.updatePlacer()
else:
# Flash entry
self.refNodePathMenuEntry.configure(background = 'Pink')
def addNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.nodePathNames,
self.nodePathMenu, self.nodePathDict)
def addRefNodePath(self, nodePath):
self.addNodePathToDict(nodePath, self.refNodePathNames,
self.refNodePathMenu, self.refNodePathDict)
def addNodePathToDict(self, nodePath, names, menu, dict):
if not nodePath:
return
# Get node path's name
name = nodePath.getName()
if name in ['parent', 'render', 'camera']:
dictName = name
else:
# Generate a unique name for the dict
dictName = name + '-' + repr(nodePath.id())
if dictName not in dict:
# Update combo box to include new item
names.append(dictName)
listbox = menu.component('scrolledlist')
listbox.setlist(names)
# Add new item to dictionary
dict[dictName] = nodePath
menu.selectitem(dictName)
def updatePlacer(self):
pos = Vec3(0)
hpr = Vec3(0)
scale = Vec3(1)
np = self['nodePath']
if (np != None) and isinstance(np, NodePath):
# Update temp CS
self.updateAuxiliaryCoordinateSystems()
# Update widgets
if self.movementMode == 'Orbit:':
pos.assign(self.posOffset)
hpr.assign(ZERO_VEC)
scale.assign(np.getScale())
elif self.refCS:
pos.assign(np.getPos(self.refCS))
hpr.assign(np.getHpr(self.refCS))
scale.assign(np.getScale())
self.updatePosWidgets(pos)
self.updateHprWidgets(hpr)
self.updateScaleWidgets(scale)
def updateAuxiliaryCoordinateSystems(self):
# Temp CS
self.tempCS.setPosHpr(self['nodePath'], 0, 0, 0, 0, 0, 0)
# Orbit CS
# At reference
self.orbitFromCS.setPos(self.refCS, 0, 0, 0)
# But aligned with target
self.orbitFromCS.setHpr(self['nodePath'], 0, 0, 0)
# Also update to CS
self.orbitToCS.setPosHpr(self.orbitFromCS, 0, 0, 0, 0, 0, 0)
# Get offset from origin
self.posOffset.assign(self['nodePath'].getPos(self.orbitFromCS))
### NODE PATH TRANSFORMATION OPERATIONS ###
def xform(self, value, axis):
if axis in ['sx', 'sy', 'sz']:
self.xformScale(value, axis)
elif self.movementMode == 'Relative To:':
self.xformRelative(value, axis)
elif self.movementMode == 'Orbit:':
self.xformOrbit(value, axis)
if self.nodePathMenu.get() == 'widget':
if base.direct.manipulationControl.fSetCoa:
# Update coa based on current widget position
base.direct.selected.last.mCoa2Dnp.assign(
base.direct.widget.getMat(base.direct.selected.last))
else:
# Move the objects with the widget
base.direct.selected.moveWrtWidgetAll()
def xformStart(self, data):
# Record undo point
self.pushUndo()
# If moving widget kill follow task and update wrts
if self.nodePathMenu.get() == 'widget':
taskMgr.remove('followSelectedNodePath')
# Record relationship between selected nodes and widget
base.direct.selected.getWrtAll()
# Record initial state
self.deltaHpr = self['nodePath'].getHpr(self.refCS)
# Update placer to reflect new state
self.updatePlacer()
def xformStop(self, data):
# Throw event to signal manipulation done
# Send nodepath as a list
messenger.send('DIRECT_manipulateObjectCleanup', [[self['nodePath']]])
# Update placer to reflect new state
self.updatePlacer()
# If moving widget restart follow task
if self.nodePathMenu.get() == 'widget':
# Restart followSelectedNodePath task
base.direct.manipulationControl.spawnFollowSelectedNodePathTask()
def xformRelative(self, value, axis):
nodePath = self['nodePath']
if (nodePath != None) and (self.refCS != None):
if axis == 'x':
nodePath.setX(self.refCS, value)
elif axis == 'y':
nodePath.setY(self.refCS, value)
elif axis == 'z':
nodePath.setZ(self.refCS, value)
else:
if axis == 'h':
self.deltaHpr.setX(value)
elif axis == 'p':
self.deltaHpr.setY(value)
elif axis == 'r':
self.deltaHpr.setZ(value)
# Put node path at new hpr
nodePath.setHpr(self.refCS, self.deltaHpr)
def xformOrbit(self, value, axis):
nodePath = self['nodePath']
if ((nodePath != None) and (self.refCS != None) and
(self.orbitFromCS != None) and (self.orbitToCS != None)):
if axis == 'x':
self.posOffset.setX(value)
elif axis == 'y':
self.posOffset.setY(value)
elif axis == 'z':
self.posOffset.setZ(value)
elif axis == 'h':
self.orbitToCS.setH(self.orbitFromCS, value)
elif axis == 'p':
self.orbitToCS.setP(self.orbitFromCS, value)
elif axis == 'r':
self.orbitToCS.setR(self.orbitFromCS, value)
nodePath.setPosHpr(self.orbitToCS, self.posOffset, ZERO_VEC)
def xformScale(self, value, axis):
if self['nodePath']:
mode = self.scalingMode.get()
scale = self['nodePath'].getScale()
if mode == 'Scale Free':
if axis == 'sx':
scale.setX(value)
elif axis == 'sy':
scale.setY(value)
elif axis == 'sz':
scale.setZ(value)
elif mode == 'Scale Uniform':
scale.set(value, value, value)
elif mode == 'Scale Proportional':
if axis == 'sx':
sf = value/scale[0]
elif axis == 'sy':
sf = value/scale[1]
elif axis == 'sz':
sf = value/scale[2]
scale = scale * sf
self['nodePath'].setScale(scale)
def updatePosWidgets(self, pos):
self.posX.set(pos[0])
self.posY.set(pos[1])
self.posZ.set(pos[2])
def updateHprWidgets(self, hpr):
self.hprH.set(hpr[0])
self.hprP.set(hpr[1])
self.hprR.set(hpr[2])
def updateScaleWidgets(self, scale):
self.scaleX.set(scale[0])
self.scaleY.set(scale[1])
self.scaleZ.set(scale[2])
def zeroAll(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.updateHprWidgets(ZERO_VEC)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def zeroPos(self):
self.xformStart(None)
self.updatePosWidgets(ZERO_VEC)
self.xformStop(None)
def zeroHpr(self):
self.xformStart(None)
self.updateHprWidgets(ZERO_VEC)
self.xformStop(None)
def unitScale(self):
self.xformStart(None)
self.updateScaleWidgets(UNIT_VEC)
self.xformStop(None)
def updateResetValues(self, nodePath):
self.initPos.assign(nodePath.getPos())
self.posX['resetValue'] = self.initPos[0]
self.posY['resetValue'] = self.initPos[1]
self.posZ['resetValue'] = self.initPos[2]
self.initHpr.assign(nodePath.getHpr())
self.hprH['resetValue'] = self.initHpr[0]
self.hprP['resetValue'] = self.initHpr[1]
self.hprR['resetValue'] = self.initHpr[2]
self.initScale.assign(nodePath.getScale())
self.scaleX['resetValue'] = self.initScale[0]
self.scaleY['resetValue'] = self.initScale[1]
self.scaleZ['resetValue'] = self.initScale[2]
def resetAll(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPosHprScale(
self.initPos, self.initHpr, self.initScale)
self.xformStop(None)
def resetPos(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setPos(self.initPos)
self.xformStop(None)
def resetHpr(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setHpr(self.initHpr)
self.xformStop(None)
def resetScale(self):
if self['nodePath']:
self.xformStart(None)
self['nodePath'].setScale(self.initScale)
self.xformStop(None)
def pushUndo(self, fResetRedo = 1):
base.direct.pushUndo([self['nodePath']])
def undoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushUndoHook(self):
# Make sure button is reactivated
self.undoButton.configure(state = 'normal')
def undoListEmptyHook(self):
# Make sure button is deactivated
self.undoButton.configure(state = 'disabled')
def pushRedo(self):
base.direct.pushRedo([self['nodePath']])
def redoHook(self, nodePathList = []):
# Reflect new changes
self.updatePlacer()
def pushRedoHook(self):
# Make sure button is reactivated
self.redoButton.configure(state = 'normal')
def redoListEmptyHook(self):
# Make sure button is deactivated
self.redoButton.configure(state = 'disabled')
def printNodePathInfo(self):
np = self['nodePath']
if np:
name = np.getName()
pos = np.getPos()
hpr = np.getHpr()
scale = np.getScale()
posString = '%.2f, %.2f, %.2f' % (pos[0], pos[1], pos[2])
hprString = '%.2f, %.2f, %.2f' % (hpr[0], hpr[1], hpr[2])
scaleString = '%.2f, %.2f, %.2f' % (scale[0], scale[1], scale[2])
print 'NodePath: %s' % name
print 'Pos: %s' % posString
print 'Hpr: %s' % hprString
print 'Scale: %s' % scaleString
print ('%s.setPosHprScale(%s, %s, %s)' %
(name, posString, hprString, scaleString))
def onDestroy(self, event):
# Remove hooks
for event, method in self.undoEvents:
self.ignore(event)
self.tempCS.removeNode()
self.orbitFromCS.removeNode()
self.orbitToCS.removeNode()
def place(nodePath):
return Placer(nodePath = nodePath)
######################################################################
# Create demo in root window for testing.
if __name__ == '__main__':
root = Pmw.initialise()
widget = Placer()
|
toontownfunserver/Panda3D-1.9.0
|
direct/tkpanels/Placer.py
|
Python
|
bsd-3-clause
| 32,632
|
# -*- coding: utf-8 -*-
#
# PyMW documentation build configuration file, created by
# sphinx-quickstart on Tue Jun 30 11:57:17 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMW'
copyright = u'2009, Eric Heien'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyMWdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PyMW.tex', u'PyMW Documentation',
u'Eric Heien', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
eheien/pymw
|
doc/conf.py
|
Python
|
mit
| 6,272
|
"""
Minimal (and limited) RPython version of some functions contained in os.path.
"""
import os, stat
from rpython.rlib import rposix
# ____________________________________________________________
#
# Generic implementations in RPython for both POSIX and NT
#
def risdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
# ____________________________________________________________
#
# POSIX-only implementations
#
def _posix_risabs(s):
"""Test whether a path is absolute"""
return s.startswith('/')
def _posix_rnormpath(path):
"""Normalize path, eliminating double slashes, etc."""
slash, dot = '/', '.'
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp == '' or comp == '.':
continue
if (comp != '..' or (not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash*initial_slashes + path
return path or dot
def _posix_rabspath(path):
"""Return an absolute, **non-normalized** path.
**This version does not let exceptions propagate.**"""
try:
if not _posix_risabs(path):
cwd = os.getcwd()
path = _posix_rjoin(cwd, path)
return _posix_rnormpath(path)
except OSError:
return path
def _posix_rjoin(a, b):
"""Join two pathname components, inserting '/' as needed.
If the second component is an absolute path, the first one
will be discarded. An empty last part will result in a path that
ends with a separator."""
path = a
if b.startswith('/'):
path = b
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
return path
# ____________________________________________________________
#
# NT-only implementations
#
def _nt_risabs(s):
"""Test whether a path is absolute"""
s = _nt_rsplitdrive(s)[1]
return s.startswith('/') or s.startswith('\\')
def _nt_rnormpath(path):
"""Normalize path, eliminating double slashes, etc."""
backslash, dot = '\\', '.'
if path.startswith(('\\\\.\\', '\\\\?\\')):
# in the case of paths with these prefixes:
# \\.\ -> device names
# \\?\ -> literal paths
# do not do any normalization, but return the path unchanged
return path
path = path.replace("/", "\\")
prefix, path = _nt_rsplitdrive(path)
# We need to be careful here. If the prefix is empty, and the path starts
# with a backslash, it could either be an absolute path on the current
# drive (\dir1\dir2\file) or a UNC filename (\\server\mount\dir1\file). It
# is therefore imperative NOT to collapse multiple backslashes blindly in
# that case.
# The code below preserves multiple backslashes when there is no drive
# letter. This means that the invalid filename \\\a\b is preserved
# unchanged, where a\\\b is normalised to a\b. It's not clear that there
# is any better behaviour for such edge cases.
if prefix == '':
# No drive letter - preserve initial backslashes
while path.startswith("\\"):
prefix = prefix + backslash
path = path[1:]
else:
# We have a drive letter - collapse initial backslashes
if path.startswith("\\"):
prefix = prefix + backslash
path = path.lstrip("\\")
comps = path.split("\\")
i = 0
while i < len(comps):
if comps[i] in ('.', ''):
del comps[i]
elif comps[i] == '..':
if i > 0 and comps[i-1] != '..':
del comps[i-1:i+1]
i -= 1
elif i == 0 and prefix.endswith("\\"):
del comps[i]
else:
i += 1
else:
i += 1
# If the path is now empty, substitute '.'
if not prefix and not comps:
comps.append(dot)
return prefix + backslash.join(comps)
def _nt_rabspath(path):
try:
if path == '':
path = os.getcwd()
return rposix._getfullpathname(path)
except OSError:
return path
def _nt_rsplitdrive(p):
"""Split a pathname into drive/UNC sharepoint and relative path
specifiers.
Returns a 2-tuple (drive_or_unc, path); either part may be empty.
"""
if len(p) > 1:
normp = p.replace(altsep, sep)
if normp.startswith('\\\\') and not normp.startswith('\\\\\\'):
# is a UNC path:
# vvvvvvvvvvvvvvvvvvvv drive letter or UNC path
# \\machine\mountpoint\directory\etc\...
# directory ^^^^^^^^^^^^^^^
index = normp.find('\\', 2)
if index < 0:
return '', p
index2 = normp.find('\\', index + 1)
# a UNC path can't have two slashes in a row
# (after the initial two)
if index2 == index + 1:
return '', p
if index2 < 0:
index2 = len(p)
return p[:index2], p[index2:]
if normp[1] == ':':
return p[:2], p[2:]
return '', p
def _nt_rjoin(path, p):
"""Join two or more pathname components, inserting "\\" as needed."""
result_drive, result_path = _nt_rsplitdrive(path)
p_drive, p_path = _nt_rsplitdrive(p)
p_is_rel = True
if p_path and p_path[0] in '\\/':
# Second path is absolute
if p_drive or not result_drive:
result_drive = p_drive
result_path = p_path
p_is_rel = False
elif p_drive and p_drive != result_drive:
if p_drive.lower() != result_drive.lower():
# Different drives => ignore the first path entirely
result_drive = p_drive
result_path = p_path
p_is_rel = False
else:
# Same drive in different case
result_drive = p_drive
if p_is_rel:
# Second path is relative to the first
if result_path and result_path[-1] not in '\\/':
result_path = result_path + '\\'
result_path = result_path + p_path
## add separator between UNC and non-absolute path
if (result_path and result_path[0] not in '\\/' and
result_drive and result_drive[-1] != ':'):
return result_drive + '\\' + result_path
return result_drive + result_path
# ____________________________________________________________
if os.name == 'posix':
sep = altsep = '/'
risabs = _posix_risabs
rnormpath = _posix_rnormpath
rabspath = _posix_rabspath
rjoin = _posix_rjoin
elif os.name == 'nt':
sep, altsep = '\\', '/'
risabs = _nt_risabs
rnormpath = _nt_rnormpath
rabspath = _nt_rabspath
rsplitdrive = _nt_rsplitdrive
rjoin = _nt_rjoin
else:
raise ImportError('Unsupported os: %s' % os.name)
|
jptomo/rpython-lang-scheme
|
rpython/rlib/rpath.py
|
Python
|
mit
| 7,407
|
from mediadrop.lib.auth.group_based_policy import *
|
kgao/MediaDrop
|
mediacore/lib/auth/group_based_policy.py
|
Python
|
gpl-3.0
| 52
|
# Copyright 2016 RedHat, inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible.module_utils.six import string_types
from ansible.parsing.utils.addresses import parse_address
class InventoryParser(object):
"""
Takes a YAML-format inventory file and builds a list of groups and subgroups
with their associated hosts and variable settings.
"""
def __init__(self, loader, groups, filename=C.DEFAULT_HOST_LIST):
self._loader = loader
self.filename = filename
# Start with an empty host list and whatever groups we're passed in
# (which should include the default 'all' and 'ungrouped' groups).
self.hosts = {}
self.patterns = {}
self.groups = groups
# Read in the hosts, groups, and variables defined in the
# inventory file.
data = loader.load_from_file(filename)
self._parse(data)
def _parse(self, data):
'''
Populates self.groups from the given array of lines. Raises an error on
any parse failure.
'''
self._compile_patterns()
# We expect top level keys to correspond to groups, iterate over them
# to get host, vars and subgroups (which we iterate over recursivelly)
for group_name in data.keys():
self._parse_groups(group_name, data[group_name])
# Finally, add all top-level groups as children of 'all'.
# We exclude ungrouped here because it was already added as a child of
# 'all' at the time it was created.
for group in self.groups.values():
if group.depth == 0 and group.name not in ('all', 'ungrouped'):
self.groups['all'].add_child_group(Group(group_name))
def _parse_groups(self, group, group_data):
if group not in self.groups:
self.groups[group] = Group(name=group)
if isinstance(group_data, dict):
#make sure they are dicts
for section in ['vars', 'children', 'hosts']:
if section in group_data and isinstance(group_data[section], string_types):
group_data[section] = { group_data[section]: None}
if 'vars' in group_data:
for var in group_data['vars']:
self.groups[group].set_variable(var, group_data['vars'][var])
if 'children' in group_data:
for subgroup in group_data['children']:
self._parse_groups(subgroup, group_data['children'][subgroup])
self.groups[group].add_child_group(self.groups[subgroup])
if 'hosts' in group_data:
for host_pattern in group_data['hosts']:
hosts = self._parse_host(host_pattern, group_data['hosts'][host_pattern])
for h in hosts:
self.groups[group].add_host(h)
def _parse_host(self, host_pattern, host_data):
'''
Each host key can be a pattern, try to process it and add variables as needed
'''
(hostnames, port) = self._expand_hostpattern(host_pattern)
hosts = self._Hosts(hostnames, port)
if isinstance(host_data, dict):
for k in host_data:
for h in hosts:
h.set_variable(k, host_data[k])
if k in ['ansible_host', 'ansible_ssh_host']:
h.address = host_data[k]
return hosts
def _expand_hostpattern(self, hostpattern):
'''
Takes a single host pattern and returns a list of hostnames and an
optional port number that applies to all of them.
'''
# Can the given hostpattern be parsed as a host with an optional port
# specification?
try:
(pattern, port) = parse_address(hostpattern, allow_ranges=True)
except:
# not a recognizable host pattern
pattern = hostpattern
port = None
# Once we have separated the pattern, we expand it into list of one or
# more hostnames, depending on whether it contains any [x:y] ranges.
if detect_range(pattern):
hostnames = expand_hostname_range(pattern)
else:
hostnames = [pattern]
return (hostnames, port)
def _Hosts(self, hostnames, port):
'''
Takes a list of hostnames and a port (which may be None) and returns a
list of Hosts (without recreating anything in self.hosts).
'''
hosts = []
# Note that we decide whether or not to create a Host based solely on
# the (non-)existence of its hostname in self.hosts. This means that one
# cannot add both "foo:22" and "foo:23" to the inventory.
for hn in hostnames:
if hn not in self.hosts:
self.hosts[hn] = Host(name=hn, port=port)
hosts.append(self.hosts[hn])
return hosts
def get_host_variables(self, host):
return {}
def _compile_patterns(self):
'''
Compiles the regular expressions required to parse the inventory and stores them in self.patterns.
'''
self.patterns['groupname'] = re.compile( r'''^[A-Za-z_][A-Za-z0-9_]*$''')
|
bjolivot/ansible
|
lib/ansible/inventory/yaml.py
|
Python
|
gpl-3.0
| 6,240
|
#!/usr/bin/python
from pisi.actionsapi import shelltools, get, autotools, pisitools
def setup():
autotools.configure ("--prefix=/usr\
--disable-static\
--disable-docs\
--docdir=/usr/share/doc/fontconfig-2.10.2")
def build():
autotools.make ()
def install():
autotools.rawInstall ("DESTDIR=%s" % get.installDIR())
|
richard-fisher/repository
|
system/base/fontconfig/actions.py
|
Python
|
gpl-2.0
| 410
|
# coding=utf-8
import threading
from flask_babel import lazy_gettext
from mycodo.databases.models import Actions
from mycodo.databases.models import Input
from mycodo.function_actions.base_function_action import AbstractFunctionAction
from mycodo.utils.database import db_retrieve_table_daemon
FUNCTION_ACTION_INFORMATION = {
'name_unique': 'clear_total_volume',
'name': f"{lazy_gettext('Flow Meter')}: {lazy_gettext('Clear Total Volume')}",
'library': None,
'manufacturer': 'Mycodo',
'url_manufacturer': None,
'url_datasheet': None,
'url_product_purchase': None,
'url_additional': None,
'message': 'Clear the total volume saved for a flow meter Input. The Input must have the Clear Total Volume option.',
'usage': 'Executing <strong>self.run_action("{ACTION_ID}")</strong> will clear the total volume for the selected flow meter Input. '
'Executing <strong>self.run_action("{ACTION_ID}", value={"input_id": "959019d1-c1fa-41fe-a554-7be3366a9c5b"})</strong> will clear the total volume for the flow meter Input with the specified ID.',
'dependencies_module': [],
'custom_options': [
{
'id': 'controller',
'type': 'select_device',
'default_value': '',
'options_select': [
'Input'
],
'name': lazy_gettext('Controller'),
'phrase': 'Select the flow meter Input'
}
]
}
class ActionModule(AbstractFunctionAction):
"""Function Action: Clear Total Volume."""
def __init__(self, action_dev, testing=False):
super(ActionModule, self).__init__(action_dev, testing=testing, name=__name__)
self.controller_id = None
action = db_retrieve_table_daemon(
Actions, unique_id=self.unique_id)
self.setup_custom_options(
FUNCTION_ACTION_INFORMATION['custom_options'], action)
if not testing:
self.setup_action()
def setup_action(self):
self.action_setup = True
def run_action(self, message, dict_vars):
try:
controller_id = dict_vars["value"]["input_id"]
except:
controller_id = self.controller_id
this_input = db_retrieve_table_daemon(
Input, unique_id=controller_id, entry='first')
if not this_input:
msg = f" Error: Input with ID '{controller_id}' not found."
message += msg
self.logger.error(msg)
return message
message += f" Clear total volume of Input {controller_id} ({this_input.name})."
clear_volume = threading.Thread(
target=self.control.custom_button,
args=("Input", this_input.unique_id, "clear_total_volume", {},))
clear_volume.start()
self.logger.debug(f"Message: {message}")
return message
def is_setup(self):
return self.action_setup
|
kizniche/Mycodo
|
mycodo/function_actions/clear_total_volume.py
|
Python
|
gpl-3.0
| 2,924
|
"""Array printing function
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
"""
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "set_printoptions", "get_printoptions"]
__docformat__ = 'restructuredtext'
#
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
# last revision: 1996-3-13
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
# and by Perry Greenfield 2000-4-1 for numarray
# and by Travis Oliphant 2005-8-22 for numpy
import sys
from functools import reduce
from . import numerictypes as _nt
from .umath import maximum, minimum, absolute, not_equal, isnan, isinf
from .multiarray import format_longfloat, datetime_as_string, datetime_data
from .fromnumeric import ravel
from .numeric import asarray
if sys.version_info[0] >= 3:
_MAXINT = sys.maxsize
_MININT = -sys.maxsize - 1
else:
_MAXINT = sys.maxint
_MININT = -sys.maxint - 1
def product(x, y): return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
_line_width = 75
_nan_str = 'nan'
_inf_str = 'inf'
_formatter = None # formatting function for array elements
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None,
nanstr=None, infstr=None,
formatter=None):
"""
Set printing options.
These options determine the way floating point numbers, arrays and
other NumPy objects are displayed.
Parameters
----------
precision : int, optional
Number of digits of precision for floating point output (default 8).
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
linewidth : int, optional
The number of characters per line for the purpose of inserting
line breaks (default 75).
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
nanstr : str, optional
String representation of floating point not-a-number (default nan).
infstr : str, optional
String representation of floating point infinity (default inf).
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
See Also
--------
get_printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
>>> print np.array([1.123456789])
[ 1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
>>> print np.arange(10)
[0 1 2 ..., 7 8 9]
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
A custom formatter can be used to display array elements as desired:
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
>>> x = np.arange(3)
>>> x
array([int: 0, int: -1, int: -2])
>>> np.set_printoptions() # formatter gets reset
>>> x
array([0, 1, 2])
To put back the default options, you can use:
>>> np.set_printoptions(edgeitems=3,infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
"""
global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
_line_width, _float_output_suppress_small, _nan_str, _inf_str, \
_formatter
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
_summaryThreshold = threshold
if edgeitems is not None:
_summaryEdgeItems = edgeitems
if precision is not None:
_float_output_precision = precision
if suppress is not None:
_float_output_suppress_small = not not suppress
if nanstr is not None:
_nan_str = nanstr
if infstr is not None:
_inf_str = infstr
_formatter = formatter
def get_printoptions():
"""
Return the current print options.
Returns
-------
print_opts : dict
Dictionary of current print options with keys
- precision : int
- threshold : int
- edgeitems : int
- linewidth : int
- suppress : bool
- nanstr : str
- infstr : str
- formatter : dict of callables
For a full description of these options, see `set_printoptions`.
See Also
--------
set_printoptions, set_string_function
"""
d = dict(precision=_float_output_precision,
threshold=_summaryThreshold,
edgeitems=_summaryEdgeItems,
linewidth=_line_width,
suppress=_float_output_suppress_small,
nanstr=_nan_str,
infstr=_inf_str,
formatter=_formatter)
return d
def _leading_trailing(a):
from . import numeric as _nc
if a.ndim == 1:
if len(a) > 2*_summaryEdgeItems:
b = _nc.concatenate((a[:_summaryEdgeItems],
a[-_summaryEdgeItems:]))
else:
b = a
else:
if len(a) > 2*_summaryEdgeItems:
l = [_leading_trailing(a[i]) for i in range(
min(len(a), _summaryEdgeItems))]
l.extend([_leading_trailing(a[-i]) for i in range(
min(len(a), _summaryEdgeItems), 0, -1)])
else:
l = [_leading_trailing(a[i]) for i in range(0, len(a))]
b = _nc.concatenate(tuple(l))
return b
def _boolFormatter(x):
if x:
return ' True'
else:
return 'False'
def repr_format(x):
return repr(x)
def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
prefix="", formatter=None):
if max_line_width is None:
max_line_width = _line_width
if precision is None:
precision = _float_output_precision
if suppress_small is None:
suppress_small = _float_output_suppress_small
if formatter is None:
formatter = _formatter
if a.size > _summaryThreshold:
summary_insert = "..., "
data = _leading_trailing(a)
else:
summary_insert = ""
data = ravel(asarray(a))
formatdict = {'bool' : _boolFormatter,
'int' : IntegerFormat(data),
'float' : FloatFormat(data, precision, suppress_small),
'longfloat' : LongFloatFormat(precision),
'complexfloat' : ComplexFormat(data, precision,
suppress_small),
'longcomplexfloat' : LongComplexFormat(precision),
'datetime' : DatetimeFormat(data),
'timedelta' : TimedeltaFormat(data),
'numpystr' : repr_format,
'str' : str}
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
for key in formatdict.keys():
formatdict[key] = formatter['all']
if 'int_kind' in fkeys:
for key in ['int']:
formatdict[key] = formatter['int_kind']
if 'float_kind' in fkeys:
for key in ['float', 'longfloat']:
formatdict[key] = formatter['float_kind']
if 'complex_kind' in fkeys:
for key in ['complexfloat', 'longcomplexfloat']:
formatdict[key] = formatter['complex_kind']
if 'str_kind' in fkeys:
for key in ['numpystr', 'str']:
formatdict[key] = formatter['str_kind']
for key in formatdict.keys():
if key in fkeys:
formatdict[key] = formatter[key]
try:
format_function = a._format
msg = "The `_format` attribute is deprecated in Numpy 2.0 and " \
"will be removed in 2.1. Use the `formatter` kw instead."
import warnings
warnings.warn(msg, DeprecationWarning)
except AttributeError:
# find the right formatting function for the array
dtypeobj = a.dtype.type
if issubclass(dtypeobj, _nt.bool_):
format_function = formatdict['bool']
elif issubclass(dtypeobj, _nt.integer):
if issubclass(dtypeobj, _nt.timedelta64):
format_function = formatdict['timedelta']
else:
format_function = formatdict['int']
elif issubclass(dtypeobj, _nt.floating):
if issubclass(dtypeobj, _nt.longfloat):
format_function = formatdict['longfloat']
else:
format_function = formatdict['float']
elif issubclass(dtypeobj, _nt.complexfloating):
if issubclass(dtypeobj, _nt.clongfloat):
format_function = formatdict['longcomplexfloat']
else:
format_function = formatdict['complexfloat']
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)):
format_function = formatdict['numpystr']
elif issubclass(dtypeobj, _nt.datetime64):
format_function = formatdict['datetime']
else:
format_function = formatdict['numpystr']
# skip over "["
next_line_prefix = " "
# skip over array(
next_line_prefix += " "*len(prefix)
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
return lst
def _convert_arrays(obj):
from . import numeric as _nc
newtup = []
for k in obj:
if isinstance(k, _nc.ndarray):
k = k.tolist()
elif isinstance(k, tuple):
k = _convert_arrays(k)
newtup.append(k)
return tuple(newtup)
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=repr, formatter=None):
"""
Return a string representation of an array.
Parameters
----------
a : ndarray
Input array.
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
precision : int, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
Represent very small numbers as zero. A number is "very small" if it
is smaller than the current printing precision.
separator : str, optional
Inserted between elements.
prefix : str, optional
An array is typically printed as::
'prefix(' + array2string(a) + ')'
The length of the prefix string is used to align the
output correctly.
style : function, optional
A function that accepts an ndarray and returns a string. Used only
when the shape of `a` is equal to ``()``, i.e. for 0-D arrays.
formatter : dict of callables, optional
If not None, the keys should indicate the type(s) that the respective
formatting function applies to. Callables should return a string.
Types that are not specified (by their corresponding keys) are handled
by the default formatters. Individual types for which a formatter
can be set are::
- 'bool'
- 'int'
- 'timedelta' : a `numpy.timedelta64`
- 'datetime' : a `numpy.datetime64`
- 'float'
- 'longfloat' : 128-bit floats
- 'complexfloat'
- 'longcomplexfloat' : composed of two 128-bit floats
- 'numpy_str' : types `numpy.string_` and `numpy.unicode_`
- 'str' : all other strings
Other keys that can be used to set a group of types at once are::
- 'all' : sets all types
- 'int_kind' : sets 'int'
- 'float_kind' : sets 'float' and 'longfloat'
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
- 'str_kind' : sets 'str' and 'numpystr'
Returns
-------
array_str : str
String representation of the array.
Raises
------
TypeError
if a callable in `formatter` does not return a string.
See Also
--------
array_str, array_repr, set_printoptions, get_printoptions
Notes
-----
If a formatter is specified for a certain type, the `precision` keyword is
ignored for that type.
This is a very flexible function; `array_repr` and `array_str` are using
`array2string` internally so keywords with the same name should work
identically in all three functions.
Examples
--------
>>> x = np.array([1e-16,1,2,3])
>>> print np.array2string(x, precision=2, separator=',',
... suppress_small=True)
[ 0., 1., 2., 3.]
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
'[0.00 1.00 2.00]'
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
'[0x0L 0x1L 0x2L]'
"""
if a.shape == ():
x = a.item()
try:
lst = a._format(x)
msg = "The `_format` attribute is deprecated in Numpy " \
"2.0 and will be removed in 2.1. Use the " \
"`formatter` kw instead."
import warnings
warnings.warn(msg, DeprecationWarning)
except AttributeError:
if isinstance(x, tuple):
x = _convert_arrays(x)
lst = style(x)
elif reduce(product, a.shape) == 0:
# treat as a null array if any of shape elements == 0
lst = "[]"
else:
lst = _array2string(a, max_line_width, precision, suppress_small,
separator, prefix, formatter=formatter)
return lst
def _extendLine(s, line, word, max_line_len, next_line_prefix):
if len(line.rstrip()) + len(word.rstrip()) >= max_line_len:
s += line.rstrip() + "\n"
line = next_line_prefix
line += word
return s, line
def _formatArray(a, format_function, rank, max_line_len,
next_line_prefix, separator, edge_items, summary_insert):
"""formatArray is designed for two modes of operation:
1. Full output
2. Summarized output
"""
if rank == 0:
obj = a.item()
if isinstance(obj, tuple):
obj = _convert_arrays(obj)
return str(obj)
if summary_insert and 2*edge_items < len(a):
leading_items, trailing_items, summary_insert1 = \
edge_items, edge_items, summary_insert
else:
leading_items, trailing_items, summary_insert1 = 0, len(a), ""
if rank == 1:
s = ""
line = next_line_prefix
for i in range(leading_items):
word = format_function(a[i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
if summary_insert1:
s, line = _extendLine(s, line, summary_insert1, max_line_len, next_line_prefix)
for i in range(trailing_items, 1, -1):
word = format_function(a[-i]) + separator
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
word = format_function(a[-1])
s, line = _extendLine(s, line, word, max_line_len, next_line_prefix)
s += line + "]\n"
s = '[' + s[len(next_line_prefix):]
else:
s = '['
sep = separator.rstrip()
for i in range(leading_items):
if i > 0:
s += next_line_prefix
s += _formatArray(a[i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if summary_insert1:
s += next_line_prefix + summary_insert1 + "\n"
for i in range(trailing_items, 1, -1):
if leading_items or i != trailing_items:
s += next_line_prefix
s += _formatArray(a[-i], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert)
s = s.rstrip() + sep.rstrip() + '\n'*max(rank-1, 1)
if leading_items or trailing_items > 1:
s += next_line_prefix
s += _formatArray(a[-1], format_function, rank-1, max_line_len,
" " + next_line_prefix, separator, edge_items,
summary_insert).rstrip()+']\n'
return s
class FloatFormat(object):
def __init__(self, data, precision, suppress_small, sign=False):
self.precision = precision
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
self.large_exponent = False
self.max_str_len = 0
try:
self.fillFormat(data)
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
def fillFormat(self, data):
from . import numeric as _nc
with _nc.errstate(all='ignore'):
special = isnan(data) | isinf(data)
valid = not_equal(data, 0) & ~special
non_zero = absolute(data.compress(valid))
if len(non_zero) == 0:
max_val = 0.
min_val = 0.
else:
max_val = maximum.reduce(non_zero)
min_val = minimum.reduce(non_zero)
if max_val >= 1.e8:
self.exp_format = True
if not self.suppress_small and (min_val < 0.0001
or max_val/min_val > 1000.):
self.exp_format = True
if self.exp_format:
self.large_exponent = 0 < min_val < 1e-99 or max_val >= 1e100
self.max_str_len = 8 + self.precision
if self.large_exponent:
self.max_str_len += 1
if self.sign:
format = '%+'
else:
format = '%'
format = format + '%d.%de' % (self.max_str_len, self.precision)
else:
format = '%%.%df' % (self.precision,)
if len(non_zero):
precision = max([_digits(x, self.precision, format)
for x in non_zero])
else:
precision = 0
precision = min(self.precision, precision)
self.max_str_len = len(str(int(max_val))) + precision + 2
if _nc.any(special):
self.max_str_len = max(self.max_str_len,
len(_nan_str),
len(_inf_str)+1)
if self.sign:
format = '%#+'
else:
format = '%#'
format = format + '%d.%df' % (self.max_str_len, precision)
self.special_fmt = '%%%ds' % (self.max_str_len,)
self.format = format
def __call__(self, x, strip_zeros=True):
from . import numeric as _nc
with _nc.errstate(invalid='ignore'):
if isnan(x):
if self.sign:
return self.special_fmt % ('+' + _nan_str,)
else:
return self.special_fmt % (_nan_str,)
elif isinf(x):
if x > 0:
if self.sign:
return self.special_fmt % ('+' + _inf_str,)
else:
return self.special_fmt % (_inf_str,)
else:
return self.special_fmt % ('-' + _inf_str,)
s = self.format % x
if self.large_exponent:
# 3-digit exponent
expsign = s[-3]
if expsign == '+' or expsign == '-':
s = s[1:-2] + '0' + s[-2:]
elif self.exp_format:
# 2-digit exponent
if s[-3] == '0':
s = ' ' + s[:-3] + s[-2:]
elif strip_zeros:
z = s.rstrip('0')
s = z + ' '*(len(s)-len(z))
return s
def _digits(x, precision, format):
s = format % x
z = s.rstrip('0')
return precision - len(s) + len(z)
class IntegerFormat(object):
def __init__(self, data):
try:
max_str_len = max(len(str(maximum.reduce(data))),
len(str(minimum.reduce(data))))
self.format = '%' + str(max_str_len) + 'd'
except (TypeError, NotImplementedError):
# if reduce(data) fails, this instance will not be called, just
# instantiated in formatdict.
pass
except ValueError:
# this occurs when everything is NA
pass
def __call__(self, x):
if _MININT < x < _MAXINT:
return self.format % x
else:
return "%s" % x
class LongFloatFormat(object):
# XXX Have to add something to determine the width to use a la FloatFormat
# Right now, things won't line up properly
def __init__(self, precision, sign=False):
self.precision = precision
self.sign = sign
def __call__(self, x):
if isnan(x):
if self.sign:
return '+' + _nan_str
else:
return ' ' + _nan_str
elif isinf(x):
if x > 0:
if self.sign:
return '+' + _inf_str
else:
return ' ' + _inf_str
else:
return '-' + _inf_str
elif x >= 0:
if self.sign:
return '+' + format_longfloat(x, self.precision)
else:
return ' ' + format_longfloat(x, self.precision)
else:
return format_longfloat(x, self.precision)
class LongComplexFormat(object):
def __init__(self, precision):
self.real_format = LongFloatFormat(precision)
self.imag_format = LongFloatFormat(precision, sign=True)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
return r + i + 'j'
class ComplexFormat(object):
def __init__(self, x, precision, suppress_small):
self.real_format = FloatFormat(x.real, precision, suppress_small)
self.imag_format = FloatFormat(x.imag, precision, suppress_small,
sign=True)
def __call__(self, x):
r = self.real_format(x.real, strip_zeros=False)
i = self.imag_format(x.imag, strip_zeros=False)
if not self.imag_format.exp_format:
z = i.rstrip('0')
i = z + 'j' + ' '*(len(i)-len(z))
else:
i = i + 'j'
return r + i
class DatetimeFormat(object):
def __init__(self, x, unit=None,
timezone=None, casting='same_kind'):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
# If timezone is default, make it 'local' or 'UTC' based on the unit
if timezone is None:
# Date units -> UTC, time units -> local
if unit in ('Y', 'M', 'W', 'D'):
self.timezone = 'UTC'
else:
self.timezone = 'local'
else:
self.timezone = timezone
self.unit = unit
self.casting = casting
def __call__(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
class TimedeltaFormat(object):
def __init__(self, data):
if data.dtype.kind == 'm':
v = data.view('i8')
max_str_len = max(len(str(maximum.reduce(v))),
len(str(minimum.reduce(v))))
self.format = '%' + str(max_str_len) + 'd'
def __call__(self, x):
return self.format % x.astype('i8')
|
tdsmith/numpy
|
numpy/core/arrayprint.py
|
Python
|
bsd-3-clause
| 26,098
|
from validate_app import validateApp
import os
from distutils import spawn
import sys
from parse_files import parseOutHTseq, bringTogether
from bashSub import bashSub
def checkPreprocessApplications():
applications = ["spades.py"]
source = ["http://bioinf.spbau.ru/spades"]
i = 0;
for app in applications:
if spawn.find_executable(app) is None:
sys.stderr.write("It doesn't look like you have app - " + app + "\n")
sys.stderr.write("Download it here - " + source[i] + "\n");
exit(0)
else:
sys.stderr.write(app + " found\n")
i += 0
def returnReads(dictSampleSeqFiles):
SE = ""
PE1 = ""
PE2 = ""
# data struct
# { (sampleKey, seqKey) : [[SE], [SE], [PE1, PE2], [PE1, PE2]] }
# diving into each of the sub lists in the dictionary value key
for e in dictSampleSeqFiles:
# if sublist only has one elment then it is SE read
if len(e) == 1:
if SE == "":
SE = e[0]
else:
SE += "," + e[0]
else:
if PE1 == "":
PE1 = e[0]
PE2 = e[1]
else:
PE1 += "," + e[0]
PE2 += "," + e[1]
return [SE, PE1, PE2]
def check_dir(Dir):
if not os.path.exists(Dir):
os.mkdir(Dir)
class spadesCMD:
def __init__(self):
self.metaDataFolder = "MetaData"
def execute(self, args):
time = 0
checkPreprocessApplications();
logFiles = []
# checkPreprocessApplications()
validate = validateApp()
validate.setValidation(True)
dictSampleSeqFiles = validate.validateSampleSheet(args.readFolder, args.spadesFolder, args.samplesFile, args.force, True)
#keys tuple 0 location being input folder
#1 location being output folder location
for keys in dictSampleSeqFiles.keys():
check_dir(args.spadesFolder)
check_dir(keys[1])
terminal = []
#countFile = os.path.join(keys[1], keys[0].split("/")[-1]) + ".counts"
print dictSampleSeqFiles[keys]
if (len(dictSampleSeqFiles[keys][1]) == 3):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2', '-s'], " --careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
elif (len(dictSampleSeqFiles[keys][1]) == 2):
terminal.append(bashSub("spades.py", dictSampleSeqFiles[keys][1], ['-1', '-2'], "--careful -t " + args.threads + " -o " + keys[1] + " -m " + args.memory, ''))
print terminal[-1].getCommand()
terminal[-1].runCmd("")
sys.stderr.flush()
#time += runSortByName.returnTime() + runView.returnTime() + htseqCmd.returnTime()
#logFiles.append(parseOutHTseq(keys[1], keys[1].split("/")[-1]))
#bringTogether(logFiles, os.path.join(args.finalDir, "Counts_Summary.log"))
print "Total amount of seconds to run all samples"
print "Seconds: " + str(time)
|
msettles/expHTS
|
expHTS/spadesCMD.py
|
Python
|
apache-2.0
| 3,121
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os.path
import sys
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
from pylib import constants
class Options(object):
"""Global options repository.
ParseArgs must be called before use. See _ARGS for common members, these will
be available as instance attributes (eg, OPTIONS.clear_cache).
"""
# Tuples of (argument name, default value, help string).
_ARGS = [ ('clear_cache', True,
'clear browser cache before loading'),
('chrome_package_name', 'chrome',
'build/android/pylib/constants package description'),
('devtools_hostname', 'localhost',
'hostname for devtools websocket connection'),
('devtools_port', 9222,
'port for devtools websocket connection'),
('local_binary', os.path.join(_SRC_DIR, 'out/Release/chrome'),
'chrome binary for local runs'),
('local_noisy', False,
'Enable local chrome console output'),
('local_profile_dir', None,
'profile directory to use for local runs'),
('no_sandbox', False,
'pass --no-sandbox to browser (local run only; see also '
'https://chromium.googlesource.com/chromium/src/+/master/'
'docs/linux_suid_sandbox_development.md)'),
('devices_file', _SRC_DIR + '/third_party/WebKit/Source/devtools'
'/front_end/emulated_devices/module.json', 'File containing a'
' list of emulated devices characteristics.'),
('emulate_device', '', 'Name of the device to emulate. Must be '
'present in --devices_file, or empty for no emulation.'),
('emulate_network', '', 'Type of network emulation. Empty for no'
' emulation.')
]
def __init__(self):
self._arg_set = set()
self._parsed_args = None
def AddGlobalArgument(self, arg_name, default, help_str):
"""Add a global argument.
Args:
arg_name: the name of the argument. This will be used as an optional --
argument.
default: the default value for the argument. The type of this default will
be used as the type of the argument.
help_str: the argument help string.
"""
self._ARGS.append((arg_name, default, help_str))
def ParseArgs(self, arg_list, description=None, extra=None):
"""Parse command line arguments.
Args:
arg_list: command line argument list.
description: description to use in argument parser.
extra: additional required arguments to add. These will be exposed as
instance attributes. This is either a list of extra arguments, or a
single string or tuple. If a tuple, the first item is the argument and
the second a default, otherwise the argument is required. Arguments are
used as in argparse, ie those beginning with -- are named, and those
without a dash are positional. Don't use a single dash.
"""
parser = self._MakeParser(description, extra)
self._parsed_args = parser.parse_args(arg_list)
def ExtractArgs(self, arg_list):
"""Extract arguments from arg_str.
Args:
arg_list: command line argument list. It will be changed so that arguments
used by this options instance are removed.
"""
parser = self._MakeParser()
(self._parsed_args, unused) = parser.parse_known_args(arg_list)
del arg_list[:]
arg_list.extend(unused)
def GetParentParser(self, group_name='Global'):
"""Returns a parser suitable for passing in as a parent to argparse.
Args:
group_name: A group name for the parser (see argparse's
add_argument_group).
Returns:
An argparse parser instance.
"""
return self._MakeParser(group=group_name)
def SetParsedArgs(self, parsed_args):
"""Set parsed args. Used with GetParentParser.
Args:
parsed_args: the result of argparse.parse_args or similar.
"""
self._parsed_args = parsed_args
def _MakeParser(self, description=None, extra=None, group=None):
add_help = True if group is None else False
parser = argparse.ArgumentParser(
description=description, add_help=add_help)
container = parser if group is None else parser.add_argument_group(group)
for arg, default, help_str in self._ARGS:
# All global options are named.
arg = '--' + arg
self._AddArg(container, arg, default, help_str=help_str)
if extra is not None:
if type(extra) is not list:
extra = [extra]
for arg in extra:
if type(arg) is tuple:
argname, default = arg
self._AddArg(container, argname, default)
else:
self._AddArg(container, arg, None, required=True)
return parser
def _AddArg(self, container, arg, default, required=False, help_str=None):
assert not arg.startswith('-') or arg.startswith('--'), \
"Single dash arguments aren't supported: %s" % arg
arg_name = arg
if arg.startswith('--'):
arg_name = arg[2:]
assert arg_name not in self._arg_set, \
'%s extra arg is a duplicate' % arg_name
self._arg_set.add(arg_name)
kwargs = {}
if required and arg.startswith('--'):
kwargs['required'] = required
if help_str is not None:
kwargs['help'] = help_str
if default is not None:
if type(default) is bool:
# If the default of a switch is true, setting the flag stores false.
if default:
kwargs['action'] = 'store_false'
else:
kwargs['action'] = 'store_true'
else:
kwargs['default'] = default
kwargs['type'] = type(default)
container.add_argument(arg, **kwargs)
def __getattr__(self, name):
if name in self._arg_set:
assert self._parsed_args, 'Option requested before ParseArgs called'
return getattr(self._parsed_args, name)
raise AttributeError(name)
def ChromePackage(self):
return constants.PACKAGE_INFO[self.chrome_package_name]
OPTIONS = Options()
|
junhuac/MQUIC
|
src/tools/android/loading/options.py
|
Python
|
mit
| 6,296
|
import datetime
import httplib
import urllib
import os.path
import csv
import time
from datetime import timedelta
import pandas as pd
import numpy as np
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def totimestamp(dt, epoch=datetime.date(1970,1,1)):
td = dt - epoch
# return td.total_seconds()
return (td.microseconds + (td.seconds + td.days * 86400) * 10**6) / 10**6
class stockImport(object):
def __init__(self
):
print ('setup stock importer')
def saveDate(self, date=None):
date_str = date.strftime("%m/%d/%Y")
print('{} finished'.format(date_str))
f = open('./twstock.tmp', 'w')
f.write(date_str)
def loadDate(self):
try:
f = open('./twstock.tmp', 'r')
date_str = f.readline()
#default set to 4 PM
return datetime.datetime.strptime(date_str + " 16:00:00", "%m/%d/%Y %H:%M:%S")
except IOError:
return datetime.datetime.strptime("1/1/2010 16:00:00", "%m/%d/%Y %H:%M:%S")
def downloadData(self):
start_day = datetime.date(2004, 2, 11);
today = datetime.date.today()
one_day = timedelta(days=1)
y, m, d, h, min, sec, wd, yd, i = datetime.datetime.now().timetuple()
end_time = today
if h > 16:
end_time = today + one_day
print "start download missing data"
print "checking from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
print "checking end time " + end_time.strftime("%Y-%m-%d")
download_date = start_day
while download_date < end_time:
file_name = "data/" + download_date.strftime("%Y%m%d") + ".csv"
if os.path.isfile(file_name):
download_date += one_day
continue
httpreq = httplib.HTTPConnection('www.twse.com.tw')
#http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=20170526&type=ALL
#headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
print
date_str = str(download_date.year - 1911 ) + download_date.strftime("/%m/%d")
form = urllib.urlencode({'download': 'csv', 'qdate': date_str, 'selectType': 'ALLBUT0999'})
#httpreq.request("POST", "/ch/trading/exchange/MI_INDEX/MI_INDEX.php", form, headers);
full_url = "exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL"
print full_url
httpreq.request("GET", "http://www.twse.com.tw/exchangeReport/MI_INDEX?response=csv&date=" + download_date.strftime("%Y%m%d") + "&type=ALL");
httpres = httpreq.getresponse()
stock_csv = httpres.read()
print "downloading " + file_name
f = open(file_name, "w")
f.write(stock_csv)
download_date += one_day
def insertToStock(self, stockid, row, date):
try:
date_str = date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stockid + '.csv')
#check if there is a key
df.loc[date_str].count()
#key already exist. skip it
except KeyError:
#no such key. insert it
df = pd.concat([df, row])
df.to_csv('bystock/' + stockid + '.csv')
#print df
except IOError:
print('stock id: {} not exist'.format(stockid))
row.to_csv('bystock/' + stockid + '.csv')
def prepcsv(self, csv):
ret = []
for i in csv:
tmp = i
tmp = tmp.replace(',', '')
tmp = tmp.replace('\'', '')
tmp = tmp.replace('\"', '')
tmp = tmp.replace('=', '')
ret.append(tmp)
return ret
def convertCSV(self, file_path=None, date=None):
print('convert csv {}'.format(file_path))
with open(file_path, 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in spamreader:
if len(row) < 16:
#abnormal some column missing?
continue
#if len(row) == 17:
#abnormal should not more than 16 column
#print(row)
if len(row) == 17:
stockid=row[0].replace('=', '')
stockid=stockid.replace('"', '')
stockid=stockid.strip()
if stockid.startswith('('):
continue
checkrow = row[11].replace(',', '')
checkrow = checkrow.replace('"', '')
checkrow = checkrow.replace('=', '')
if not checkrow[0].isdigit():
#skip column title
continue
row = self.prepcsv(row)
TV=int(row[2])
TC=int(row[3])
TO=int(row[4])
RD=row[9]
if RD == '+':
DF=float(row[10])
RD=1
elif RD == '-':
DF=0-float(row[10])
RD=-1
else:
DF=0
RD=0
PE=float(row[15])
try:
OP=float(row[5])
CP=float(row[8])
HP=float(row[6])
LP=float(row[7])
except ValueError:
OP=None
CP=None
HP=None
LP=None
#print('OP:{} CP:{} HP:{} LP:{} DF:{} RD:{} TV:{} TC:{} TO:{}\n'.format( OP, CP, HP, LP, DF, RD, TV, TC, TO))
cols = ['OP', 'CP', 'HP', 'LP', 'DF', 'RD', 'TV', 'TC', 'TO']
date_index = pd.date_range(date.strftime("%m/%d/%Y"), periods=1)
df1 = pd.DataFrame([[OP, CP, HP, LP, DF, RD, TV, TC, TO]], columns=cols)
df1['date'] = date_index
df1 = df1.set_index(['date'])
#print stockid
#print df1
self.insertToStock(stockid, df1, date)
self.saveDate(date)
def getExpectCP(self, df, date):
today = datetime.date.today()
one_day = timedelta(days=1)
if date > today:
#print "over today"
#print date.strftime("%Y-%m-%d")
return None
try:
date_str = date.strftime("%Y-%m-%d")
return df.loc[date_str, 'CP']
except KeyError as e:
return self.getExpectCP(df, date + one_day)
def loadTrainDataById(self, stock_id, start_date, days, expect):
one_day = timedelta(days=1)
stop_date = start_date + one_day * days
expect_date = start_date + one_day * (days + expect)
today = datetime.date.today()
if stop_date > today:
return None
try:
start_date_str = start_date.strftime("%Y-%m-%d")
stop_date_str = stop_date.strftime("%Y-%m-%d")
expect_date_str = expect_date.strftime("%Y-%m-%d")
df = pd.DataFrame.from_csv('bystock/' + stock_id + '.csv')
print "from:" + start_date_str + " to:" + stop_date_str
dft = df.loc[start_date_str:stop_date_str]
#print dft.as_matrix()
#print dft.reset_index().values
dfcp = df.loc[start_date_str:stop_date_str, 'CP']
#print df.loc[start_date_str:expect_date_str, 'CP']
expcp = self.getExpectCP(df, expect_date)
if expcp == None:
return
#print dfcp
print 'max during train:' + str(dfcp.max())
print str(expect) + ' days ' + expect_date_str + ' close price' + str(expcp)
if expcp > dfcp.max():
print 'up'
else:
print 'down'
except KeyError as e:
print "out of range , try next day"
except IOError:
print "no such stock id"
def loadTrainDataByIdFixedRow(self, stock_id, start_date, days, expect):
one_day = timedelta(days=1)
stop_date = start_date + one_day * days
expect_date = start_date + one_day * (days + expect)
today = datetime.date.today()
res = 0
if stop_date > today:
return None, None
try:
start_date_str = start_date.strftime("%Y-%m-%d")
stop_date_str = stop_date.strftime("%Y-%m-%d")
expect_date_str = expect_date.strftime("%Y-%m-%d")
today_date_str = datetime.date.today()
#read data frame from stock file
df = pd.DataFrame.from_csv('bystock/' + stock_id + '.csv')
#print "from:" + start_date_str + " to:" + stop_date_str
#count total record from start to now, check if data record is enough
dft = df.loc[start_date_str:today_date_str]
if dft['CP'].count() < days + expect:
print 'data is not enough for train or validate'
return None, None
#retrive enough data record days + expect days
dft = dft[:days + expect]
#get the expect date data record
expcpdf = dft.tail(1)
#print dft
#print dft[:days]
#print dft.as_matrix()
#print dft.reset_index().values
#first n days data record for training
dfcp = dft[:days]
#convert to matrix
data = dfcp.as_matrix()
#get expected close price
expcp = expcpdf['CP'].max()
#get max close price in training data
tmax = dft[:days]['CP'].max()
#print 'last price:' + str(expcpdf['CP'])
#print 'max during train:' + str(tmax)
#print str(expect) + ' days close price:' + str(expcp)
if expcp > tmax:
res = 1
#print 'up'
else:
res = 0
#print 'down'
return data, res
except KeyError as e:
print "out of range , try next day"
except IOError:
print "no such stock id"
def loadAllTrainDataByStock(self, stock_id, start_date, days, expect):
today = datetime.date.today()
one_day = timedelta(days=1)
da = start_date
X = []
Y = []
while da < today:
da = da + one_day
d, r = self.loadTrainDataByIdFixedRow(stock_id, da, days, expect)
if d is None:
break
x = sum(d.tolist(), [])
X.append(x)
Y.append(r)
#print("---------------------------------------------")
#print x
#print("---------------------------------------------")
#print r
return X,Y
|
kaija/tw-stock
|
stock.py
|
Python
|
mit
| 11,215
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function
# Copyright (C) 2005-2007 Carabos Coop. V. All rights reserved
# Copyright (C) 2008-2013 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - vmas@vitables.org
import locale
import warnings
import sys
from liam2 import config
from liam2.compat import PY2
from liam2.utils import ExceptionOnGetAttr
try:
from qtpy import QtWidgets
from vitables.vtapp import VTApp
except ImportError as e:
msg = "the 'view' command is not available because 'vitables' does not seem to be installed correctly (%s)." % e
print("Warning:", msg)
if not config.debug and not PY2:
e = ImportError(msg).with_traceback(sys.exc_info()[2])
VTApp = ExceptionOnGetAttr(e)
QtWidgets = ExceptionOnGetAttr(e)
def viewhdf(filepaths):
app = QtWidgets.QApplication(filepaths)
# These imports must be done after the QApplication has been instantiated
with warnings.catch_warnings():
# ignore deprecation warnings just for this import
warnings.filterwarnings("ignore", category=DeprecationWarning)
from vitables.vtapp import VTApp
from vitables.preferences import vtconfig
# Specify the organization's Internet domain. When the Internet
# domain is set, it is used on Mac OS X instead of the organization
# name, since Mac OS X applications conventionally use Internet
# domains to identify themselves
app.setOrganizationDomain('vitables.org')
app.setOrganizationName('ViTables')
app.setApplicationName('ViTables')
app.setApplicationVersion(vtconfig.getVersion())
# Localize the application using the system locale
# numpy seems to have problems with decimal separator in some locales
# (catalan, german...) so C locale is always used for numbers.
locale.setlocale(locale.LC_ALL, '')
locale.setlocale(locale.LC_NUMERIC, 'C')
# Start the application
vtapp = VTApp(mode='a', h5files=filepaths)
vtapp.gui.show()
app.exec_()
|
liam2/liam2
|
liam2/view.py
|
Python
|
gpl-3.0
| 2,740
|
from PyQt5.QtCore import *
from PyDesignData.PyDesignObject import *
from PyDesignModel.PyDesignCalcSheetsItem import PyDesignCalcSheetsItem
from PyDesignModel.PyDesignIcons import *
from PyDesignModel.PyDesignMaterialsItem import PyDesignMaterialsItem
from PyDesignModel.PyDesignModelItem import PyDesignModelItem
from PyDesignModel.PyDesignParametersItem import *
from PyDesignModel.PyDesignGeometriesItem import *
from PyDesignModel.PyDesignMeshesItem import *
from PyDesignModel.PyDesignSolversItem import PyDesignSolversItem
__author__ = 'magnus'
class PyDesignAnalysisItem(PyDesignModelItem):
def __init__(self, parent, py_design_analysis):
"""
:type py_design_analysis: PyDesignAnalysis
:param parent:
:param py_design_analysis:
:return:
"""
PyDesignModelItem.__init__(self, parent, parent.model)
self._data_object = py_design_analysis
self._data_dict[PyDesignNamedObject.NAME] = self.data_name
self._data_dict[PyDesignCommon.VALUE] = self.data_value
'''self._data_dict[PDP.size_temp] = self.data_size_temp
self._data_dict[PDP.medium_type] = self.data_medium_type
self._data_dict[PDP.size_pres] = self.data_size_pres'''
self._set_data_dict[PyDesignNamedObject.NAME] = self.set_data_name
self._icon = get_icon("analysis")
py_design_analysis.add_listener(self)
self._children.append(PyDesignParametersItem(py_design_analysis.properties, self))
self._children.append(PyDesignCalcSheetsItem(py_design_analysis, self))
self._children.append(PyDesignGeometriesItem(py_design_analysis, self))
self._children.append(PyDesignMeshesItem(py_design_analysis, self))
self._children.append(PyDesignMaterialsItem(py_design_analysis, self))
self._children.append(PyDesignSolversItem(py_design_analysis, self))
self._type = "PyDesignAnalysisModelItem"
self._context_menu = QMenu()
add_prop_menu = self._context_menu.addAction("Add property")
add_prop_menu.triggered.connect(self.on_add_property)
add_prop_menu = self._context_menu.addAction("Add calculation sheet")
add_prop_menu.triggered.connect(self.on_add_sheet)
add_prop_menu = self._context_menu.addAction("Add geometry")
add_prop_menu.triggered.connect(self.on_add_geometry)
add_prop_menu = self._context_menu.addAction("Add mesh")
add_prop_menu.triggered.connect(self.on_add_mesh)
add_prop_menu = self._context_menu.addAction("Add material")
add_prop_menu.triggered.connect(self.on_add_material)
add_prop_menu = self._context_menu.addAction("Delete analysis")
add_prop_menu.triggered.connect(self.on_delete)
def on_add_property(self):
PyDesignEventHandlers.on_add_parameter(self._data_object.properties)
def on_add_sheet(self):
PyDesignEventHandlers.on_add_sheet(self._data_object)
def on_add_geometry(self):
PyDesignEventHandlers.on_add_geometry(self._data_object, None)
def on_add_mesh(self):
pass
def on_add_material(self):
pass
def on_delete(self):
pass
def data_name(self, int_role):
if int_role == Qt.DisplayRole or int_role == Qt.EditRole:
return self._data_object.name
elif int_role == Qt.DecorationRole:
return self._icon
else:
return None
def set_data_name(self, int_role, data):
if int_role == Qt.EditRole:
self._data_object.name = data
return True
def data_value(self, int_role):
if int_role == Qt.DisplayRole:
type_name = "Unknown analysis"
type_name = "3D analysis" if self._data_object.analysis_type == 0 else type_name
type_name = "2D analysis" if self._data_object.analysis_type == 1 else type_name
type_name = "2D analysis axis symmetric" if self._data_object.analysis_type == 2 else type_name
return type_name
else:
return None
def data_size_pres(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.size_pres
else:
return None
def data_medium_type(self, int_role):
if int_role == Qt.DisplayRole:
return self._data_object.medium_type
else:
return None
@staticmethod
def item_flags(int_pdp):
default_flags = Qt.ItemIsSelectable | Qt.ItemIsEnabled
if int_pdp == PyDesignNamedObject.NAME:
return default_flags | Qt.ItemIsEditable
else:
return default_flags
def on_context_menu(self, point):
self._context_menu.exec_(point)
def on_event(self, event):
"""
:type event: PyDesignEvent
:param event:
:return:
"""
self._model.on_item_changed(self)
if event.type == PyDesignEvent.EndItemAddedEvent:
pass
#new_item = PyDesignParameterItem(self, event.value)
#self.add_child(new_item)
return
|
pracedru/pyDesign
|
PyDesignModel/PyDesignAnalysisItem.py
|
Python
|
mit
| 5,100
|
from __future__ import division, print_function, absolute_import
import numpy as np
import numpy.testing as npt
import pytest
from pytest import raises as assert_raises
from scipy._lib._numpy_compat import suppress_warnings
from scipy.integrate import IntegrationWarning
from scipy import stats
from scipy.special import betainc
from. common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_entropy_vect_scale,
check_edge_support, check_named_args,
check_random_state_property,
check_meth_dtype, check_ppf_dtype, check_cmplx_deriv,
check_pickling, check_rvs_broadcast)
from scipy.stats._distr_params import distcont
"""
Test all continuous distributions.
Parameters were chosen for those distributions that pass the
Kolmogorov-Smirnov test. This provides safe parameters for each
distributions so that we can perform further testing of class methods.
These tests currently check only/mostly for serious errors and exceptions,
not for numerically exact results.
"""
# Note that you need to add new distributions you want tested
# to _distr_params
DECIMAL = 5 # specify the precision of the tests # increased from 0 to 5
# Last four of these fail all around. Need to be checked
distcont_extra = [
['betaprime', (100, 86)],
['fatiguelife', (5,)],
['mielke', (4.6420495492121487, 0.59707419545516938)],
['invweibull', (0.58847112119264788,)],
# burr: sample mean test fails still for c<1
['burr', (0.94839838075366045, 4.3820284068855795)],
# genextreme: sample mean test, sf-logsf test fail
['genextreme', (3.3184017469423535,)],
]
distslow = ['kappa4', 'rdist', 'gausshyper',
'recipinvgauss', 'genexpon',
'vonmises', 'vonmises_line', 'mielke', 'semicircular',
'cosine', 'invweibull', 'powerlognorm', 'johnsonsu', 'kstwobign']
# distslow are sorted by speed (very slow to slow)
# These distributions fail the complex derivative test below.
# Here 'fail' mean produce wrong results and/or raise exceptions, depending
# on the implementation details of corresponding special functions.
# cf https://github.com/scipy/scipy/pull/4979 for a discussion.
fails_cmplx = set(['beta', 'betaprime', 'chi', 'chi2', 'dgamma', 'dweibull',
'erlang', 'f', 'gamma', 'gausshyper', 'gengamma',
'gennorm', 'genpareto', 'halfgennorm', 'invgamma',
'ksone', 'kstwobign', 'levy_l', 'loggamma', 'logistic',
'maxwell', 'nakagami', 'ncf', 'nct', 'ncx2', 'norminvgauss',
'pearson3', 'rice', 't', 'skewnorm', 'tukeylambda',
'vonmises', 'vonmises_line', 'rv_histogram_instance'])
_h = np.histogram([1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5, 6,
6, 6, 6, 7, 7, 7, 8, 8, 9], bins=8)
histogram_test_instance = stats.rv_histogram(_h)
def cases_test_cont_basic():
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
elif distname in distslow:
yield pytest.param(distname, arg, marks=pytest.mark.slow)
else:
yield distname, arg
@pytest.mark.parametrize('distname,arg', cases_test_cont_basic())
def test_cont_basic(distname, arg):
# this test skips slow distributions
if distname == 'truncnorm':
pytest.xfail(reason=distname)
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
np.random.seed(765456)
sn = 500
with suppress_warnings() as sup:
# frechet_l and frechet_r are deprecated, so all their
# methods generate DeprecationWarnings.
sup.filter(category=DeprecationWarning, message=".*frechet_")
rvs = distfn.rvs(size=sn, *arg)
sm = rvs.mean()
sv = rvs.var()
m, v = distfn.stats(*arg)
check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, distname + 'sample mean test')
check_cdf_ppf(distfn, arg, distname)
check_sf_isf(distfn, arg, distname)
check_pdf(distfn, arg, distname)
check_pdf_logpdf(distfn, arg, distname)
check_cdf_logcdf(distfn, arg, distname)
check_sf_logsf(distfn, arg, distname)
alpha = 0.01
if distname == 'rv_histogram_instance':
check_distribution_rvs(distfn.cdf, arg, alpha, rvs)
else:
check_distribution_rvs(distname, arg, alpha, rvs)
locscale_defaults = (0, 1)
meths = [distfn.pdf, distfn.logpdf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_x = {'frechet_l': -0.5, 'weibull_max': -0.5, 'levy_l': -0.5,
'pareto': 1.5, 'tukeylambda': 0.3,
'rv_histogram_instance': 5.0}
x = spec_x.get(distname, 0.5)
if distname == 'invweibull':
arg = (1,)
elif distname == 'ksone':
arg = (3,)
check_named_args(distfn, x, arg, locscale_defaults, meths)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
# Entropy
if distname not in ['kstwobign']:
check_entropy(distfn, arg, distname)
if distfn.numargs == 0:
check_vecentropy(distfn, arg)
if (distfn.__class__._entropy != stats.rv_continuous._entropy
and distname != 'vonmises'):
check_private_entropy(distfn, arg, stats.rv_continuous)
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "Extremely bad integrand")
sup.filter(RuntimeWarning, "invalid value")
check_entropy_vect_scale(distfn, arg)
check_edge_support(distfn, arg)
check_meth_dtype(distfn, arg, meths)
check_ppf_dtype(distfn, arg)
if distname not in fails_cmplx:
check_cmplx_deriv(distfn, arg)
if distname != 'truncnorm':
check_ppf_private(distfn, arg, distname)
def test_levy_stable_random_state_property():
# levy_stable only implements rvs(), so it is skipped in the
# main loop in test_cont_basic(). Here we apply just the test
# check_random_state_property to levy_stable.
check_random_state_property(stats.levy_stable, (0.5, 0.1))
def cases_test_moments():
fail_normalization = set(['vonmises'])
fail_higher = set(['vonmises', 'ncf'])
for distname, arg in distcont[:] + [(histogram_test_instance, tuple())]:
if distname == 'levy_stable':
continue
cond1 = distname not in fail_normalization
cond2 = distname not in fail_higher
yield distname, arg, cond1, cond2, False
if not cond1 or not cond2:
# Run the distributions that have issues twice, once skipping the
# not_ok parts, once with the not_ok parts but marked as knownfail
yield pytest.param(distname, arg, True, True, True,
marks=pytest.mark.xfail)
@pytest.mark.slow
@pytest.mark.parametrize('distname,arg,normalization_ok,higher_ok,is_xfailing',
cases_test_moments())
def test_moments(distname, arg, normalization_ok, higher_ok, is_xfailing):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'rv_histogram_instance'
with suppress_warnings() as sup:
sup.filter(IntegrationWarning,
"The integral is probably divergent, or slowly convergent.")
sup.filter(category=DeprecationWarning, message=".*frechet_")
if is_xfailing:
sup.filter(IntegrationWarning)
m, v, s, k = distfn.stats(*arg, moments='mvsk')
if normalization_ok:
check_normalization(distfn, arg, distname)
if higher_ok:
check_mean_expect(distfn, arg, m, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
check_var_expect(distfn, arg, m, v, distname)
check_kurt_expect(distfn, arg, m, v, k, distname)
check_loc_scale(distfn, arg, m, v, distname)
check_moment(distfn, arg, m, v, distname)
@pytest.mark.parametrize('dist,shape_args', distcont)
def test_rvs_broadcast(dist, shape_args):
if dist in ['gausshyper', 'genexpon']:
pytest.skip("too slow")
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betaprime', 'dgamma', 'exponnorm', 'norminvgauss',
'nct', 'dweibull', 'rice', 'levy_stable', 'skewnorm']
distfunc = getattr(stats, dist)
loc = np.zeros(2)
scale = np.ones((3, 1))
nargs = distfunc.numargs
allargs = []
bshape = [3, 2]
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 4,) + (1,)*(k + 2)
allargs.append(shape_args[k]*np.ones(shp))
bshape.insert(0, k + 4)
allargs.extend([loc, scale])
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, 'd')
def test_rvs_gh2069_regression():
# Regression tests for gh-2069. In scipy 0.17 and earlier,
# these tests would fail.
#
# A typical example of the broken behavior:
# >>> norm.rvs(loc=np.zeros(5), scale=np.ones(5))
# array([-2.49613705, -2.49613705, -2.49613705, -2.49613705, -2.49613705])
np.random.seed(123)
vals = stats.norm.rvs(loc=np.zeros(5), scale=1)
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=0, scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.zeros(5), scale=np.ones(5))
d = np.diff(vals)
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
vals = stats.norm.rvs(loc=np.array([[0], [0]]), scale=np.ones(5))
d = np.diff(vals.ravel())
npt.assert_(np.all(d != 0), "All the values are equal, but they shouldn't be!")
assert_raises(ValueError, stats.norm.rvs, [[0, 0], [0, 0]],
[[1, 1], [1, 1]], 1)
assert_raises(ValueError, stats.gamma.rvs, [2, 3, 4, 5], 0, 1, (2, 2))
assert_raises(ValueError, stats.gamma.rvs, [1, 1, 1, 1], [0, 0, 0, 0],
[[1], [2]], (4,))
def check_sample_meanvar_(distfn, arg, m, v, sm, sv, sn, msg):
# this did not work, skipped silently by nose
if np.isfinite(m):
check_sample_mean(sm, sv, sn, m)
if np.isfinite(v):
check_sample_var(sv, sn, v)
def check_sample_mean(sm, v, n, popmean):
# from stats.stats.ttest_1samp(a, popmean):
# Calculates the t-obtained for the independent samples T-test on ONE group
# of scores a, given a population mean.
#
# Returns: t-value, two-tailed prob
df = n-1
svar = ((n-1)*v) / float(df) # looks redundant
t = (sm-popmean) / np.sqrt(svar*(1.0/n))
prob = betainc(0.5*df, 0.5, df/(df + t*t))
# return t,prob
npt.assert_(prob > 0.01, 'mean fail, t,prob = %f, %f, m, sm=%f,%f' %
(t, prob, popmean, sm))
def check_sample_var(sv, n, popvar):
# two-sided chisquare test for sample variance equal to
# hypothesized variance
df = n-1
chi2 = (n-1)*popvar/float(popvar)
pval = stats.distributions.chi2.sf(chi2, df) * 2
npt.assert_(pval > 0.01, 'var fail, t, pval = %f, %f, v, sv=%f, %f' %
(chi2, pval, popvar, sv))
def check_cdf_ppf(distfn, arg, msg):
values = [0.001, 0.5, 0.999]
npt.assert_almost_equal(distfn.cdf(distfn.ppf(values, *arg), *arg),
values, decimal=DECIMAL, err_msg=msg +
' - cdf-ppf roundtrip')
def check_sf_isf(distfn, arg, msg):
npt.assert_almost_equal(distfn.sf(distfn.isf([0.1, 0.5, 0.9], *arg), *arg),
[0.1, 0.5, 0.9], decimal=DECIMAL, err_msg=msg +
' - sf-isf roundtrip')
npt.assert_almost_equal(distfn.cdf([0.1, 0.9], *arg),
1.0 - distfn.sf([0.1, 0.9], *arg),
decimal=DECIMAL, err_msg=msg +
' - cdf-sf relationship')
def check_pdf(distfn, arg, msg):
# compares pdf at median with numerical derivative of cdf
median = distfn.ppf(0.5, *arg)
eps = 1e-6
pdfv = distfn.pdf(median, *arg)
if (pdfv < 1e-4) or (pdfv > 1e4):
# avoid checking a case where pdf is close to zero or
# huge (singularity)
median = median + 0.1
pdfv = distfn.pdf(median, *arg)
cdfdiff = (distfn.cdf(median + eps, *arg) -
distfn.cdf(median - eps, *arg))/eps/2.0
# replace with better diff and better test (more points),
# actually, this works pretty well
msg += ' - cdf-pdf relationship'
npt.assert_almost_equal(pdfv, cdfdiff, decimal=DECIMAL, err_msg=msg)
def check_pdf_logpdf(distfn, args, msg):
# compares pdf at several points with the log of the pdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
pdf = distfn.pdf(vals, *args)
logpdf = distfn.logpdf(vals, *args)
pdf = pdf[pdf != 0]
logpdf = logpdf[np.isfinite(logpdf)]
msg += " - logpdf-log(pdf) relationship"
npt.assert_almost_equal(np.log(pdf), logpdf, decimal=7, err_msg=msg)
def check_sf_logsf(distfn, args, msg):
# compares sf at several points with the log of the sf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
sf = distfn.sf(vals, *args)
logsf = distfn.logsf(vals, *args)
sf = sf[sf != 0]
logsf = logsf[np.isfinite(logsf)]
msg += " - logsf-log(sf) relationship"
npt.assert_almost_equal(np.log(sf), logsf, decimal=7, err_msg=msg)
def check_cdf_logcdf(distfn, args, msg):
# compares cdf at several points with the log of the cdf
points = np.array([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8])
vals = distfn.ppf(points, *args)
cdf = distfn.cdf(vals, *args)
logcdf = distfn.logcdf(vals, *args)
cdf = cdf[cdf != 0]
logcdf = logcdf[np.isfinite(logcdf)]
msg += " - logcdf-log(cdf) relationship"
npt.assert_almost_equal(np.log(cdf), logcdf, decimal=7, err_msg=msg)
def check_distribution_rvs(dist, args, alpha, rvs):
# test from scipy.stats.tests
# this version reuses existing random variables
D, pval = stats.kstest(rvs, dist, args=args, N=1000)
if (pval < alpha):
D, pval = stats.kstest(dist, '', args=args, N=1000)
npt.assert_(pval > alpha, "D = " + str(D) + "; pval = " + str(pval) +
"; alpha = " + str(alpha) + "\nargs = " + str(args))
def check_vecentropy(distfn, args):
npt.assert_equal(distfn.vecentropy(*args), distfn._entropy(*args))
def check_loc_scale(distfn, arg, m, v, msg):
loc, scale = 10.0, 10.0
mt, vt = distfn.stats(loc=loc, scale=scale, *arg)
npt.assert_allclose(m*scale + loc, mt)
npt.assert_allclose(v*scale*scale, vt)
def check_ppf_private(distfn, arg, msg):
# fails by design for truncnorm self.nb not defined
ppfs = distfn._ppf(np.array([0.1, 0.5, 0.9]), *arg)
npt.assert_(not np.any(np.isnan(ppfs)), msg + 'ppf private is nan')
|
Eric89GXL/scipy
|
scipy/stats/tests/test_continuous_basic.py
|
Python
|
bsd-3-clause
| 16,318
|
"""
We try to be very hygienic regarding the exceptions we throw:
Every Exception mitmproxy raises shall be a subclass of ProxyException.
See also: http://lucumr.pocoo.org/2014/10/16/on-error-handling/
"""
from __future__ import absolute_import, print_function, division
import sys
import traceback
class ProxyException(Exception):
"""
Base class for all exceptions thrown by mitmproxy.
"""
def __init__(self, message=None):
super(ProxyException, self).__init__(message)
class Kill(ProxyException):
"""
Signal that both client and server connection(s) should be killed immediately.
"""
pass
class ProtocolException(ProxyException):
pass
class TlsProtocolException(ProtocolException):
pass
class ClientHandshakeException(TlsProtocolException):
def __init__(self, message, server):
super(ClientHandshakeException, self).__init__(message)
self.server = server
class InvalidServerCertificate(TlsProtocolException):
def __repr__(self):
# In contrast to most others, this is a user-facing error which needs to look good.
return str(self)
class Socks5ProtocolException(ProtocolException):
pass
class HttpProtocolException(ProtocolException):
pass
class Http2ProtocolException(ProtocolException):
pass
class ServerException(ProxyException):
pass
class ContentViewException(ProxyException):
pass
class ReplayException(ProxyException):
pass
class ScriptException(ProxyException):
@classmethod
def from_exception_context(cls, cut_tb=1):
"""
Must be called while the current stack handles an exception.
Args:
cut_tb: remove N frames from the stack trace to hide internal calls.
"""
exc_type, exc_value, exc_traceback = sys.exc_info()
while cut_tb > 0:
exc_traceback = exc_traceback.tb_next
cut_tb -= 1
tb = "".join(traceback.format_exception(exc_type, exc_value, exc_traceback))
return cls(tb)
class FlowReadException(ProxyException):
pass
class ControlException(ProxyException):
pass
class OptionsError(Exception):
pass
class AddonError(Exception):
pass
|
jvillacorta/mitmproxy
|
mitmproxy/exceptions.py
|
Python
|
mit
| 2,225
|
from mongoWork import MongoWork
from flask import Flask, request, session, render_template, url_for, redirect, jsonify
from json import loads
app = Flask('lc-server')
app.secret_key = 'developerkey'
config = {}
mongo = None
@app.route('/', methods=['GET', 'POST'])
def index():
if session.get('logged') is not None:
return render_template('index.html', computers=mongo.findComputers())
else:
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'GET':
if session.get('logged') is None:
return render_template('login.html')
else:
return redirect(url_for('index'))
elif request.method == 'POST':
if request.form.get('user') == config['siteUser']:
if request.form.get('pass') == config['sitePass']:
session['logged'] = True
return redirect(url_for('index'))
return render_template('login.html')
@app.route('/heartbeat')
def heartbeat():
return jsonify(mongo.heartbeatResponse(request.environ.get('HTTP_X_REAL_IP', request.remote_addr)))
@app.route('/addtask', methods=['POST'])
def addtask():
if session.get('logged') is None:
return redirect(url_for('login'))
elif 'shell@@@' in request.form.get('task'):
return jsonify(mongo.addTask(
request.environ.get('HTTP_X_REAL_IP', request.remote_addr),
request.form['task']))
@app.route('/dataload', methods=['POST'])
def dataload():
req_json = request.get_json()
req_json["ip"] = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
try:
mongo.saveInfo(req_json)
return jsonify({"accepted": True})
except Exception as e:
print("[dataload]:{}".format(e))
return jsonify({"accepted": False})
@app.route('/computer', methods=['GET'])
def getComputer():
if request.args.get('host') and len(request.args['host'].split('.')) == 4:
pcs = mongo.findComputers({'ip': request.args['host']})
if len(pcs) == 0:
return redirect(url_for('index'))
else:
return render_template('computer.html', computer=pcs[0])
else:
return redirect(url_for('index'))
@app.route('/logout')
def logout():
if session.get('logged') is not None:
session.pop('logged')
return render_template('logout.html')
else:
return redirect(url_for('login'))
def loadConfig():
global config
o = open('./data/config.json')
data = o.read()
o.close()
config = loads(data)
loadConfig()
mongo = MongoWork(config)
app.run(host=config['address'], port=config['port'], debug=config['debug'], threaded=config['threaded'])
|
arseniypetrikor/lc-server
|
main.py
|
Python
|
gpl-3.0
| 2,557
|
import _plotly_utils.basevalidators
class YcalendarValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ycalendar", parent_name="histogram", **kwargs):
super(YcalendarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
[
"gregorian",
"chinese",
"coptic",
"discworld",
"ethiopian",
"hebrew",
"islamic",
"julian",
"mayan",
"nanakshahi",
"nepali",
"persian",
"jalali",
"taiwan",
"thai",
"ummalqura",
],
),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/histogram/_ycalendar.py
|
Python
|
mit
| 1,058
|
# Copyright (C) 2016 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# This file is part of the sos project: https://github.com/sosreport/sos
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# version 2 of the GNU General Public License.
#
# See the LICENSE file in the source distribution for further information.
from sos.plugins import Plugin, RedHatPlugin
class Dracut(Plugin, RedHatPlugin):
""" Dracut initramfs generator """
plugin_name = "dracut"
packages = ("dracut",)
def setup(self):
self.add_copy_spec([
"/etc/dracut.conf",
"/etc/dracut.conf.d"
])
self.add_cmd_output([
"dracut --list-modules",
"dracut --print-cmdline"
])
# vim: set et ts=4 sw=4 :
|
nijinashok/sos
|
sos/plugins/dracut.py
|
Python
|
gpl-2.0
| 862
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
from googletrans import Translator
translator = Translator()
line = sys.stdin.readline()
while line:
match = re.search('^:([^\s]+) PRIVMSG (#[^\s]+) :(.+)', line)
if not match:
line = sys.stdin.readline()
continue
who = match.group(1)
chan = match.group(2)
what = match.group(3).strip().strip('\r\n')
def reply(text):
print("PRIVMSG %s :%s" % (chan, text))
sys.stdout.flush()
if what[:10] == ':translate':
m2 = re.search('^:translate (.*)', what)
if not m2:
line = sys.stdin.readline()
continue
try:
reply(translator.translate(m2.group(1), dest='fr').text)
except:
reply('Oups!')
elif what[:4] == ':tr ':
m2 = re.search('^:tr ([\w-]+) ([\w-]+) (.+)', what)
if not m2:
line = sys.stdin.readline()
continue
try:
reply(translator.translate(m2.group(3), src=m2.group(1), dest=m2.group(2)).text)
except:
reply('Oups!')
line = sys.stdin.readline()
|
fridim/cabot
|
plugins_examples/translate.py
|
Python
|
mit
| 1,141
|
"""
Hooks to change class:`.PathSimulator` behavior.
These hooks group several methods together for use as part of a
:class:`.PathSimulator` ``run`` method. They allow for additional
calculations or output at several points in the simulation.
"""
import re
import time
import logging
import openpathsampling as paths
from datetime import timedelta
from openpathsampling.netcdfplus import StorableNamedObject
logger = logging.getLogger(__name__)
class SimulationNotFoundError(RuntimeError):
"""
Raised when a hook tries to access its parent simulation before knowing it.
"""
pass
class GraciousKillError(RuntimeError):
"""
Raised when a simulation reaches the maximum walltime.
"""
# makes it easy to catch **only** the max walltime but fail on anything else
pass
def _self_or_sim_property_or_err(obj, prop_name, sim_prop_name=None):
# helper function to get values either from the hook or the parent sim
if sim_prop_name is None:
# make it possible to have different names for the sim and
# self attributes (but default to same name)
sim_prop_name = prop_name
if getattr(obj, "_" + prop_name) is not None:
return getattr(obj, "_" + prop_name)
elif obj._simulation is not None:
return getattr(obj._simulation, sim_prop_name)
else:
raise SimulationNotFoundError("'" + prop_name + "' has not "
+ "been set and no hosting "
+ "simulation known to get "
+ "simulation." + sim_prop_name + " ."
)
class PathSimulatorHook(StorableNamedObject):
"""Superclass for PathSimulator hooks.
This implementation is a do-nothing hook. Subclasses should subclass the
relevant method in order to add hooks PathSimulator objects.
"""
implemented_for = ['before_simulation', 'before_step', 'after_step',
'after_simulation']
def before_simulation(self, sim, **kwargs):
pass # pragma: no-cover
def before_step(self, sim, step_number, step_info, state):
pass # pragma: no-cover
def after_step(self, sim, step_number, step_info, state, results,
hook_state):
pass # pragma: no-cover
def after_simulation(self, sim, hook_state):
pass # pragma: no-cover
class StorageHook(PathSimulatorHook):
"""
Standard hook for storage.
NOTE: Arguments passed to init take precedence over the corresponding
parameters of the PathSimulator this hook is attached to. They can
only be accessed through this hook, e.g. as hook.live_visualizer.
Parameters
----------
storage : :class:`.Storage`
where to save to; default ``None`` uses the simulation's
``storage``
frequency : int
save frequency measured in steps; default ``None`` uses the
simulation's value for ``save_frequency``
"""
implemented_for = ['before_simulation', 'after_step',
'after_simulation']
def __init__(self, storage=None, frequency=None):
self.storage = storage
self.frequency = frequency
self._simulation = None
@property
def frequency(self):
return _self_or_sim_property_or_err(self, "frequency", "save_frequency")
@frequency.setter
def frequency(self, val):
self._frequency = val
@property
def storage(self):
return _self_or_sim_property_or_err(self, "storage")
@storage.setter
def storage(self, val):
self._storage = val
def before_simulation(self, sim, **kwargs):
self._simulation = sim
def after_step(self, sim, step_number, step_info, state, results,
hook_state):
if self.storage is not None:
try:
self.storage.stash(results)
except AttributeError:
self.storage.save(results)
if step_number % self.frequency == 0:
self.storage.sync_all()
def after_simulation(self, sim, hook_state):
if self.storage is not None:
self.storage.sync_all()
class ShootFromSnapshotsOutputHook(PathSimulatorHook):
"""Default (serial) output for ShootFromSnapshotsSimulation objects.
Updates every time a new snapshot is shot from.
NOTE: Arguments passed to init take precedence over the corresponding
parameters of the PathSimulator this hook is attached to. They can
only be accessed through this hook, e.g. as hook.live_visualizer.
Parameters
----------
output_stream : stream
where to write the results; default ``None`` uses the simulation's
``output_stream``
allow_refresh : bool
whether to allow refresh (see :meth:`.refresh_output`); default
``None`` uses the simulation's value
"""
implemented_for = ['before_simulation', 'before_step']
def __init__(self, output_stream=None, allow_refresh=None):
self.output_stream = output_stream
self.allow_refresh = allow_refresh
self._simulation = None
@property
def output_stream(self):
return _self_or_sim_property_or_err(self, "output_stream")
@output_stream.setter
def output_stream(self, val):
self._output_stream = val
@property
def allow_refresh(self):
return _self_or_sim_property_or_err(self, "allow_refresh")
@allow_refresh.setter
def allow_refresh(self, val):
self._allow_refresh = val
def before_simulation(self, sim, **kwargs):
self._simulation = sim
def before_step(self, sim, step_number, step_info, state):
snap_num, n_snapshots, step, n_per_snapshot = step_info
paths.tools.refresh_output(
"Working on snapshot %d / %d; shot %d / %d" % (
snap_num+1, n_snapshots, step+1, n_per_snapshot
),
output_stream=self.output_stream,
refresh=self.allow_refresh
)
class SampleSetSanityCheckHook(PathSimulatorHook):
"""
Check sample set sanity.
Parameters
----------
frequency : int
check frequency measured in steps; default ``None`` uses the
simulation's value for ``save_frequency``
"""
implemented_for = ['before_simulation', 'after_step']
def __init__(self, frequency=None):
self.frequency = frequency
self._simulation = None
@property
def frequency(self):
return _self_or_sim_property_or_err(self, "frequency", "save_frequency")
@frequency.setter
def frequency(self, val):
self._frequency = val
def before_simulation(self, sim, **kwargs):
self._simulation = sim
def after_step(self, sim, step_number, step_info, state, results,
hook_state):
if step_number % self.frequency == 0:
if sim.sample_set is not None:
# some PathSimulators never set their sample_set
# but PathSimulator.__init__ sets it to None
sim.sample_set.sanity_check()
class LiveVisualizerHook(PathSimulatorHook):
"""
LiveVisualization using the :class:`openpathsampling.StepVisualizer2D`.
Updates every ``simulation.status_update_frequency`` MCSteps, where
simulation is the ``PathSimulator`` this hook is attached to.
NOTE: You will have to set PathSimulator.allow_refresh = False
Otherwise the LiveVisualization will get refreshed away
(i.e. deleted) right after creation.
NOTE: Arguments passed to init take precedence over the corresponding
parameters of the PathSimulator this hook is attached to. They can
only be accessed through this hook, e.g. as hook.live_visualizer.
Parameters
----------
live_visualizer : :class:`openpathsampling.StepVisualizer2D`
default ``None`` uses the simulation's live_visualizer
status_update_frequency : int
number of steps between two refreshs of the visualization;
default ``None`` uses the simulation's value (PathSampling default=1)
"""
# NOTE: we visualize after step, because otherwise the 'next' MCstep
# would depend on the 'previous' one just for viualization
# this deviates from the previous implementation but avoids
# having to pass the previous MCstep to before_step hooks
implemented_for = ['before_simulation', 'after_step']
def __init__(self, live_visualizer=None, status_update_frequency=None):
self.live_visualizer = live_visualizer
self.status_update_frequency = status_update_frequency
self._simulation = None
@property
def live_visualizer(self):
return _self_or_sim_property_or_err(self, "live_visualizer")
@live_visualizer.setter
def live_visualizer(self, val):
self._live_visualizer = val
@property
def status_update_frequency(self):
return _self_or_sim_property_or_err(self, "status_update_frequency")
@status_update_frequency.setter
def status_update_frequency(self, val):
self._status_update_frequency = val
def before_simulation(self, sim, **kwargs):
self._simulation = sim
def after_step(self, sim, step_number, step_info, state, results,
hook_state):
if step_number % self.status_update_frequency == 0:
# do we visualize this step?
if self.live_visualizer is not None and results is not None:
# do we visualize at all?
self.live_visualizer.draw_ipynb(results)
class PathSamplingOutputHook(PathSimulatorHook):
"""
Default (serial) output for PathSamplingSimulation objects.
Updates every ``PathSampling.status_update_frequency`` MCSteps.
NOTE: Arguments passed to init take precedence over the corresponding
parameters of the PathSimulator this hook is attached to. They can
only be accessed through this hook, e.g. as hook.output_stream.
Parameters
----------
output_stream : stream
where to write the results; default ``None`` uses the simulation's
``output_stream``
allow_refresh : bool
whether to allow refresh (see :meth:`.refresh_output`); default
``None`` uses the simulation's value
status_update_frequency : int
number of steps between two refreshs of the visualization;
default `None` uses the simulations value (PathSampling default=1)
"""
implemented_for = ['before_simulation', 'before_step', 'after_simulation']
def __init__(self, output_stream=None, allow_refresh=None,
status_update_frequency=None):
self.output_stream = output_stream
self.allow_refresh = allow_refresh
self.status_update_frequency = status_update_frequency
self._simulation = None
@property
def output_stream(self):
return _self_or_sim_property_or_err(self, "output_stream")
@output_stream.setter
def output_stream(self, val):
self._output_stream = val
@property
def allow_refresh(self):
return _self_or_sim_property_or_err(self, "allow_refresh")
@allow_refresh.setter
def allow_refresh(self, val):
self._allow_refresh = val
@property
def status_update_frequency(self):
return _self_or_sim_property_or_err(self, "status_update_frequency")
@status_update_frequency.setter
def status_update_frequency(self, val):
self._status_update_frequency = val
def before_simulation(self, sim, **kwargs):
self._simulation = sim
self._initial_time = time.time()
def before_step(self, sim, step_number, step_info, state):
if step_number % self.status_update_frequency == 0:
nn, n_steps = step_info
elapsed = time.time() - self._initial_time
paths.tools.refresh_output(
"Working on Monte Carlo cycle number " + str(step_number)
+ "\n" + paths.tools.progress_string(nn, n_steps, elapsed),
refresh=self.allow_refresh,
output_stream=self.output_stream
)
def after_simulation(self, sim, hook_state):
paths.tools.refresh_output(
"DONE! Completed " + str(sim.step) + " Monte Carlo cycles.\n",
refresh=False,
output_stream=self.output_stream
)
class GraciousKillHook(PathSimulatorHook):
"""
'Graciously' kill a simulation when the maximum walltime is reached.
If this hook is attached to PathSimulator, it will continously estimate
the runtime per step. After each step it checks if the next step would
exceed the maximum walltime, if so it syncs and closes the storage, then
it calls a custom/user provided function (if any) and finally raises a
'GraciousKillError' to end the simulation loop.
Example usage
-------------
```
kill_hook = GraciousKillHook("3 minutes 34 seconds")
# sampler is a `PathSimulator`
sampler.attach_hook(kill_hook)
try:
sampler.run(2000)
except GraciousKillError:
print("Simulation ended due to maximum walltime reached")
```
"""
implemented_for = ["before_simulation", "after_step"]
def __init__(self, max_walltime, fuzziness=1, final_call=None):
"""
Initialize a GraciousKillHook.
Parameters
----------
max_walltime - str, maximum allowed walltime,
e.g. `23 hours 20 minutes`
fuzziness - float (default=0.9), fraction to add to the time per step
when estimating if the next step could exceed the maximum
walltime, i.e. the default is to stop when the time left
suffices for slighlty less than 2 steps
final_call - None or callable (default None), will be called when the
simulation is killed, it must take one argument, the hook
passes the step number of the step after which it killed
the simulation
"""
self.max_walltime = max_walltime
self._timedelta = self._get_timedelta(max_walltime).total_seconds()
logger.info("Parsed time string '{:s}' as a ".format(self.max_walltime)
+ "timedelta of {:d} seconds.".format(int(self._timedelta))
)
self.fuzziness = fuzziness
self.final_call = final_call
def _get_timedelta(self, time_str):
# https://stackoverflow.com/questions/35545140
timespaces = {"days": 0}
for timeunit in "year month week day hour minute second".split():
content = re.findall(r"([0-9]*?)\s*?" + timeunit, time_str)
if content:
timespaces[timeunit + "s"] = int(content[0])
timespaces["days"] += (30 * timespaces.pop("months", 0)
+ 365 * timespaces.pop("years", 0)
)
return timedelta(**timespaces)
def before_simulation(self, sim, **kwargs):
self._t_start = time.time()
def after_step(self, sim, step_number, step_info, state, results,
hook_state):
now = time.time()
current_step, total_steps = step_info
running = now - self._t_start
# current_step is zero based, i.e. its a 'range'
per_step = running / (current_step + 1)
if (per_step * (1 + self.fuzziness) + running) > self._timedelta:
logger.info("Ending simulation because maximum walltime"
+ " ({:s}) ".format(self.max_walltime)
+ "would be surpassed during the next MCstep."
)
if sim.storage is not None:
sim.storage.sync_all()
sim.storage.close()
if self.final_call is not None:
# call users custom exit function
self.final_call(step_number)
raise GraciousKillError("Maximum walltime "
+ "({:s})".format(self.max_walltime)
+ " reached."
)
|
choderalab/openpathsampling
|
openpathsampling/beta/hooks.py
|
Python
|
lgpl-2.1
| 16,334
|
"""Simple script to walk the lights up and down"""
import time
# exit_event is passed in from the pre/post show script as is required
# if an exit_event is generated the pre/post show script can terminate the script
# Do not forget to include it, if you do not sms commands will not be able
# to end the script and you will have to wait for it to finish
def main(exit_event):
"""
ladder
Lights one channel at a time in order
Then backs down to the first
Then repeat everything 20 times
"""
# this is a list of all the channels you have access to
lights = hc.channels
# start with all the lights off
hc.turn_off_lights()
# pause for 1 second
time.sleep(1)
# working loop
for _ in range(20):
# here we just loop over the gpio pins and do something with them
# except the last one
for light in range(len(lights)-1):
# turn off all the lights
hc.turn_off_lights()
# then turn on one
hc.turn_on_light(light)
# wait a little bit
time.sleep(.04)
# to make the transition back smoother we handle the last pin here
hc.turn_off_lights()
hc.turn_on_light(light + 1)
# this loop walks it back the other way
for light in range(len(lights)-1, 0, -1):
# turn off all the lights
hc.turn_off_lights()
# then turn on one
hc.turn_on_light(light)
# wait a little bit
time.sleep(.04)
# again to make it smoother handle the first pin like the last pin
hc.turn_off_lights()
hc.turn_on_light(light - 1)
# this is required so that an sms play now command will
# end your script and any subprocess you have statred
if exit_event.is_set():
break
# lets make sure we turn off the lights before we go back to the show
hc.turn_off_lights()
|
wheeldog515/lightshowPi
|
py/examples/ladder.py
|
Python
|
bsd-2-clause
| 1,958
|
from __future__ import division
import numpy
import scipy.misc
import operator
import math
from util import memoize_instance
import warnings
from size_history import ConstantTruncatedSizeHistory
import numpy as np
from convolution_momi import convolve_chen
math_mod = math
myint,myfloat = int,float
## UNCOMMENT FOR HIGHER PRECISION
# import gmpy2
# math_mod = gmpy2
# gmpy2.get_context().precision=100
# myint,myfloat = gmpy2.mpz, gmpy2.mpfr
'''
Formulas from Hua Chen 2012, Theoretical Population Biology
Note that for all formulas from that paper, N = diploid population size
'''
class _SumProduct_Chen(object):
'''
compute sfs of data via Hua Chen's sum-product algorithm
'''
def __init__(self, demography):
self.G = demography
attach_Chen(self.G)
def p(self):
'''Return the likelihood for the data'''
return self.partial_likelihood_bottom(self.G.root)[1]
def partial_likelihood_top(self, node):
lik, sfs = self.partial_likelihood_bottom(node)
return self.G.chen[node].apply_transition(lik), sfs
def partial_likelihood_bottom(self, node):
sfs = 0.0
if self.G.is_leaf(node):
n = self.G.n_lineages_subtended_by[node]
n_der = self.G._n_derived_subtended_by[node]
lik = np.zeros((len(n_der), n+1, n+1))
lik[range(len(n_der)), n-n_der, n_der] = 1.0
else:
children = tuple(self.G[node])
ch_liks, ch_sfs = zip(*[self.partial_likelihood_top(ch)
for ch in children])
ch_liks = [l * self.combinatorial_factors(ch)
for l,ch in zip(ch_liks, children)]
lik = convolve_chen(*ch_liks) / self.combinatorial_factors(node)
for ch1, ch2 in ((0,1),(1,0)):
sfs += ch_sfs[ch1] * (self.G._n_derived_subtended_by[children[ch2]] == 0)
sfs += (lik * self.truncated_sfs(node)).sum(axis=(1,2))
return lik,sfs
def combinatorial_factors(self, node):
n_node = self.G.n_lineages_subtended_by[node]
n_der = np.outer(np.ones(n_node+1), np.arange(n_node+1))
n_anc = np.outer(np.arange(n_node+1), np.ones(n_node+1))
return scipy.misc.comb(n_der + n_anc, n_der)
def truncated_sfs(self, node):
n_node = self.G.n_lineages_subtended_by[node]
sfs = np.zeros((n_node+1,n_node+1))
for n_der in range(1,n_node+1):
for n_anc in range(n_node-n_der+1):
sfs[n_anc,n_der] = self.G.chen[node].freq(n_der, n_der+n_anc)
return sfs
def attach_Chen(tree):
'''Attach Hua Chen equations to each node of tree.
Does nothing if these formulas have already been added.'''
if not hasattr(tree, "chen"):
tree.chen = {}
for node in tree:
size_model = tree._node_data[node]['model']
if type(size_model) is not ConstantTruncatedSizeHistory:
raise NotImplementedError("Hua Chen's equations only implemented for constant population size along each branch")
tree.chen[node] = SFS_Chen(size_model.N / 2.0, size_model.tau, tree.n_lineages_subtended_by[node])
class SFS_Chen(object):
def __init__(self, N_diploid, timeLen, max_n):
self.timeLen = timeLen
self.N_diploid = N_diploid
self.max_n = max_n
# precompute
for n in range(1,max_n+1):
for i in range(1,n+1):
self.freq(i,n)
max_m = n
if timeLen == float('inf'):
max_m = 1
for m in range(1,max_m+1):
self.g(n,m)
@memoize_instance
def g(self, n, m):
return g(n, m, self.N_diploid, self.timeLen)
@memoize_instance
def ET(self, i, n, m):
try:
return ET(i, n, m, self.N_diploid, self.timeLen)
except ZeroDivisionError:
warnings.warn("divide by zero in hua chen formula")
return 0.0
@memoize_instance
def ES_i(self, i, n, m):
'''TPB equation 4'''
assert n >= m
return math.fsum([p_n_k(i, n, k) * k * self.ET(k, n, m) for k in range(m, n + 1)])
@memoize_instance
def freq(self, i, n):
max_m = n-i+1
if self.timeLen == float('inf'):
max_m = 1
ret = 0.0
for m in range(1,max_m+1):
ret += self.ES_i(i, n, m)
return ret
def apply_transition(self, likelihoods):
## einsum should be faster, but causes memory overflow for our machines/tests
# return np.einsum('ijkl,mij->mkl',
# self.transition_tensor(),
# likelihoods)
## just use for loops
n = likelihoods.shape[-1]-1
assert likelihoods.shape[1:] == (n+1,n+1)
ret = np.zeros(likelihoods.shape)
for n_top in range(1,n+1):
for n_bottom in range(n_top,n+1):
for n_derived_bottom in range(n_bottom+1):
for n_derived_top in range(n_derived_bottom+1):
n_ancestral_bottom = n_bottom - n_derived_bottom
n_ancestral_top = n_top - n_derived_top
tmp = np.array(likelihoods[:,n_ancestral_bottom,n_derived_bottom])
tmp *= self.g(n_bottom,
n_top) * math.exp(log_urn_prob(n_derived_top,
n_ancestral_top,
n_derived_bottom,
n_ancestral_bottom))
ret[:,n_ancestral_top,n_derived_top] += tmp
return ret
@memoize_instance
def transition_tensor(self):
n = self.max_n
ret = np.zeros((n+1,n+1,n+1,n+1))
for n_top in range(1,n+1):
for n_bottom in range(n_top,n+1):
for n_derived_bottom in range(n_bottom+1):
for n_derived_top in range(n_derived_bottom+1):
n_ancestral_bottom = n_bottom - n_derived_bottom
n_ancestral_top = n_top - n_derived_top
ret[n_ancestral_bottom,
n_derived_bottom,
n_ancestral_top,
n_derived_top] = self.g(n_bottom,
n_top) * math.exp(log_urn_prob(n_derived_top,
n_ancestral_top,
n_derived_bottom,
n_ancestral_bottom))
return ret
def log_factorial(n):
return math_mod.lgamma(n+1)
def log_rising(n,k):
return log_factorial(n+k-1) - log_factorial(n-1)
def log_falling(n,k):
return log_factorial(n) - log_factorial(n-k)
def gcoef(k, n, m, N_diploid, tau):
k, n, m = map(myint, [k, n, m])
N_diploid = myfloat(N_diploid)
tau = myfloat(tau)
return (2*k - 1) * (-1)**(k - m) * math_mod.exp(log_rising(m, k-1) + log_falling(n, k) - log_factorial(m) - log_factorial(k - m) - log_rising(n, k))
#return (2*k - 1) * (-1)**(k - m) * rising(m, k-1) * falling(n, k) / math_mod.factorial(m) / math_mod.factorial(k - m) / rising(n, k)
def g_sum(n, m, N_diploid, tau):
if tau == float("inf"):
if m == 1:
return 1.0
return 0.0
tau = myfloat(tau)
return float(sum([gcoef(k, n, m, N_diploid, tau) * math_mod.exp(-k * (k - 1) * tau / 4 / N_diploid) for k in range(m, n + 1)]))
g = g_sum
def formula1(n, m, N_diploid, tau):
def expC2(k):
return math_mod.exp(-k * (k - 1) / 4 / N_diploid * tau)
r = sum(gcoef(k, n, m, N_diploid, tau) *
((expC2(m) - expC2(k)) / (k - m) / (k + m - 1) - (tau / 4 / N_diploid * expC2(m)))
for k in range(m + 1, n + 1))
#q = 4 * N_diploid / g(n, m, N_diploid, tau)
q = 4 * N_diploid
return float(r * q)
def formula3(j, n, m, N_diploid, tau):
# Switch argument to j here to stay consistent with the paper.
j, n, m = map(myint, [j, n, m])
tau, N_diploid = map(myfloat, [tau, N_diploid])
def expC2(kk):
return math_mod.exp(-kk * (kk - 1) / 4 / N_diploid * tau)
r = sum(gcoef(k, n, j, N_diploid, tau) * # was gcoef(k, n, j + 1, N_diploid, tau) *
sum(gcoef(ell, j, m, N_diploid, tau) * ( # was gcoef(ell, j - 1, m, N_diploid, tau) * (
(
expC2(j) * (tau / 4 / N_diploid - ((k - j) * (k + j - 1) + (ell - j)*(ell + j - 1)) / # tau / 4 / N_diploid was 1 in this
(k - j) / (k + j- 1) / (ell - j) / (ell + j - 1))
)
+
(
expC2(k) * (ell - j) * (ell + j - 1) / (k - j) / (k + j - 1) / (ell - k) / (ell + k - 1)
)
-
(
expC2(ell) * (k - j) * (k + j - 1) / (ell - k) / (ell + k - 1) / (ell - j) / (ell + j - 1)
)
)
for ell in range(m, j)
)
for k in range(j + 1, n + 1)
)
#q = 4 * N_diploid / myfloat(g(n, m, N_diploid, tau))
q = 4 * N_diploid
return float(q * r)
def formula2(n, m, N_diploid, tau):
def expC2(k):
return math_mod.exp(-k * (k - 1) / 4 / N_diploid * tau)
r = sum(gcoef(k, n, m, N_diploid, tau) *
((expC2(k) - expC2(n)) / (n - k) / (n + k - 1) - (tau / 4 / N_diploid * expC2(n)))
for k in range(m, n))
#q = 4 * N_diploid / g(n, m, N_diploid, tau)
q = 4 * N_diploid
return float(r * q)
def ET(i, n, m, N_diploid, tau):
'''Starting with n lineages in a population of size N_diploid,
expected time when there are i lineages conditional on there
being m lineages at time tau in the past.'''
if tau == float("inf"):
if m != 1 or i == 1:
return 0.0
return 2 * N_diploid / float(nChoose2(i)) * g(n, m, N_diploid, tau)
if n == m:
return tau * (i == n) * g(n, m, N_diploid, tau)
if m == i:
return formula1(n, m, N_diploid, tau)
elif n == i:
return formula2(n, m, N_diploid, tau)
else:
return formula3(i, n, m, N_diploid, tau)
def p_n_k(i, n, k):
if k == 1:
return int(i == n)
else:
#return scipy.misc.comb(n-i-1,k-2) / scipy.misc.comb(n-1,k-1)
return math.exp(log_binom(n - i - 1, k - 2) - log_binom(n - 1, k - 1))
def nChoose2(n):
return (n * (n-1)) / 2
def log_binom(n, k):
if k < 0 or k > n:
return -float('inf')
return log_factorial(n) - log_factorial(n - k) - log_factorial(k)
def log_urn_prob(n_parent_derived, n_parent_ancestral, n_child_derived, n_child_ancestral):
n_parent = n_parent_derived + n_parent_ancestral
n_child = n_child_derived + n_child_ancestral
if n_child_derived >= n_parent_derived and n_parent_derived > 0 and n_child_ancestral >= n_parent_ancestral and n_parent_ancestral > 0:
return log_binom(n_child_derived - 1, n_parent_derived - 1) + log_binom(n_child_ancestral - 1, n_parent_ancestral - 1) - log_binom(n_child-1, n_parent-1)
elif n_child_derived == n_parent_derived == 0 or n_child_ancestral == n_parent_ancestral == 0:
return 0.0
else:
return float("-inf")
|
jackkamm/momi
|
momi/huachen_eqs.py
|
Python
|
gpl-3.0
| 11,877
|
#!/usr/bin/env python
people = 30
cars = 40
trucks = 15
if cars > people:
print("We should take the cars.")
elif cars < people:
print("We should not take the cars")
else:
print("We can't decide.")
if trucks > cars:
print("That's too many trucks.")
elif trucks < cars:
print("Maybe we coudl take the trucks.")
else:
print("We still can't decide.")
if people > trucks:
print("Alright, let's just take the trucks.")
else:
print("Fine, let's stay home then.")
|
davvi/Hardway3
|
ex30.py
|
Python
|
mit
| 492
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import gpg
import os.path
import sys
del absolute_import, division, unicode_literals
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <https://www.gnu.org/licenses/>.
print("""
This script imports one or more public keys from a single file.
""")
c = gpg.Context(armor=True)
if len(sys.argv) >= 3:
keyfile = sys.argv[1]
homedir = sys.argv[2]
elif len(sys.argv) == 2:
keyfile = sys.argv[1]
homedir = input("Enter the GPG configuration directory path (optional): ")
else:
keyfile = input("Enter the path and filename to import the key(s) from: ")
homedir = input("Enter the GPG configuration directory path (optional): ")
if homedir.startswith("~"):
if os.path.exists(os.path.expanduser(homedir)) is True:
c.home_dir = os.path.expanduser(homedir)
else:
pass
elif os.path.exists(homedir) is True:
c.home_dir = homedir
else:
pass
if os.path.isfile(keyfile) is True:
with open(keyfile, "rb") as f:
incoming = f.read()
result = c.key_import(incoming)
else:
result = None
if result is not None and hasattr(result, "considered") is False:
print(result)
elif result is not None and hasattr(result, "considered") is True:
num_keys = len(result.imports)
new_revs = result.new_revocations
new_sigs = result.new_signatures
new_subs = result.new_sub_keys
new_uids = result.new_user_ids
new_scrt = result.secret_imported
nochange = result.unchanged
print("""
The total number of keys considered for import was: {0}
Number of keys revoked: {1}
Number of new signatures: {2}
Number of new subkeys: {3}
Number of new user IDs: {4}
Number of new secret keys: {5}
Number of unchanged keys: {6}
The key IDs for all considered keys were:
""".format(num_keys, new_revs, new_sigs, new_subs, new_uids, new_scrt,
nochange))
for i in range(num_keys):
print(result.imports[i].fpr)
print("")
elif result is None:
print("You must specify a key file to import.")
|
gpg/gpgme
|
lang/python/examples/howto/import-key.py
|
Python
|
lgpl-2.1
| 3,060
|
#
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# This module defines notification-related functions and constants, for use
# with the client-side virtualization code.
#
import sys
import time
from up2date_client import up2dateAuth
from up2date_client import up2dateErrors
from up2date_client import rhnserver
from up2date_client import up2dateLog
from virtualization.errors import NotRegistered
log = up2dateLog.initLog()
###############################################################################
# Constants
###############################################################################
class EventType:
EXISTS = 'exists'
REMOVED = 'removed'
CRAWL_BEGAN = 'crawl_began'
CRAWL_ENDED = 'crawl_ended'
class TargetType:
SYSTEM = 'system'
DOMAIN = 'domain'
LOG_MSG = 'log_message'
###############################################################################
# Plan Class
###############################################################################
class Plan:
def __init__(self):
self.__items = []
def add(self, event, target = None, properties = {}):
"""
Creates a new plan item and adds it to the list.
"""
self.__items.append(self.__make_item(event, target, properties))
def execute(self):
"""
Sends all items in the plan to the satellite.
"""
systemid = up2dateAuth.getSystemId()
if systemid is None:
raise NotRegistered("System ID not found.")
server = rhnserver.RhnServer()
try:
server.registration.virt_notify(systemid, self.__items)
except up2dateErrors.CommunicationError:
e = sys.exc_info()[1]
log.trace_me()
log.log_me(e)
def __make_item(self, event, target, properties):
"""
Creates a new plan item.
"""
# Get the current time.
current_time = int(time.time())
return ( current_time, event, target, properties )
|
mcalmer/spacewalk
|
client/tools/rhn-virtualization/virtualization/notification.py
|
Python
|
gpl-2.0
| 2,598
|
from django.shortcuts import resolve_url
from django.test import TestCase
from InternetSemLimites.core.models import Provider, State
class TestGet(TestCase):
def setUp(self):
sc, *_ = State.objects.get_or_create(abbr='SC', name='Santa Catarina')
go, *_ = State.objects.get_or_create(abbr='GO', name='Goiás')
sp, *_ = State.objects.get_or_create(abbr='GO', name='São Paulo')
props_published = {'name': 'Xpto',
'url': 'http://xp.to',
'source': 'http://twitter.com/xpto',
'category': Provider.FAME,
'other': 'Lorem ipsum',
'status': Provider.PUBLISHED}
props_refused= {'name': 'Xpto',
'url': 'http://xp.to',
'source': 'http://twitter.com/xpto',
'category': Provider.FAME,
'other': 'Lorem ipsum',
'status': Provider.REFUSED}
provider_published = Provider.objects.create(**props_published)
provider_refused = Provider.objects.create(**props_refused)
provider_published.coverage.set([sc, go])
provider_refused.coverage.set([sp])
self.resp = self.client.get(resolve_url('markdown:fame'))
def test_get(self):
self.assertEqual(200, self.resp.status_code)
def test_type(self):
self.assertEqual('text/markdown; charset=UTF-8', self.resp['Content-Type'])
def test_template(self):
self.assertTemplateUsed(self.resp, 'markdown/fame.md')
def test_contents(self):
contents = ['Xpto', 'Goiás', 'Lorem', 'http://xp.to', 'twitter.com']
for content in contents:
with self.subTest():
self.assertContains(self.resp, content)
self.assertNotContains(self.resp, 'São Paulo')
|
InternetSemLimites/PublicAPI
|
InternetSemLimites/markdown/tests/test_readme_view.py
|
Python
|
mit
| 1,892
|
import click
from arrow.cli import pass_context, json_loads
from arrow.decorators import custom_exception, list_output
@click.command('get_group_creator')
@click.argument("group", type=str)
@pass_context
@custom_exception
@list_output
def cli(ctx, group):
"""Get the group's creator
Output:
creator userId
"""
return ctx.gi.groups.get_group_creator(group)
|
galaxy-genome-annotation/python-apollo
|
arrow/commands/groups/get_group_creator.py
|
Python
|
mit
| 376
|
# -*- coding: utf-8 -*-
# vim: autoindent shiftwidth=4 expandtab textwidth=120 tabstop=4 softtabstop=4
###############################################################################
# OpenLP - Open Source Lyrics Projection #
# --------------------------------------------------------------------------- #
# Copyright (c) 2008-2013 Raoul Snyman #
# Portions copyright (c) 2008-2013 Tim Bentley, Gerald Britton, Jonathan #
# Corwin, Samuel Findlay, Michael Gorven, Scott Guerrieri, Matthias Hub, #
# Meinert Jordan, Armin Köhler, Erik Lundin, Edwin Lunando, Brian T. Meyer. #
# Joshua Miller, Stevan Pettit, Andreas Preikschat, Mattias Põldaru, #
# Christian Richter, Philip Ridout, Simon Scudder, Jeffrey Smith, #
# Maikel Stuivenberg, Martin Thompson, Jon Tibble, Dave Warnock, #
# Frode Woldsund, Martin Zibricky, Patrick Zimmermann #
# --------------------------------------------------------------------------- #
# This program is free software; you can redistribute it and/or modify it #
# under the terms of the GNU General Public License as published by the Free #
# Software Foundation; version 2 of the License. #
# #
# This program is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for #
# more details. #
# #
# You should have received a copy of the GNU General Public License along #
# with this program; if not, write to the Free Software Foundation, Inc., 59 #
# Temple Place, Suite 330, Boston, MA 02111-1307 USA #
###############################################################################
"""
Provide the theme XML and handling functions for OpenLP v2 themes.
"""
import os
import re
import logging
from xml.dom.minidom import Document
from lxml import etree, objectify
from openlp.core.lib import str_to_bool, ScreenList
log = logging.getLogger(__name__)
BLANK_THEME_XML = \
'''<?xml version="1.0" encoding="utf-8"?>
<theme version="1.0">
<name> </name>
<background type="image">
<filename></filename>
<borderColor>#000000</borderColor>
</background>
<background type="gradient">
<startColor>#000000</startColor>
<endColor>#000000</endColor>
<direction>vertical</direction>
</background>
<background type="solid">
<color>#000000</color>
</background>
<font type="main">
<name>Arial</name>
<color>#FFFFFF</color>
<size>40</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="10" width="1004" height="690"/>
</font>
<font type="footer">
<name>Arial</name>
<color>#FFFFFF</color>
<size>12</size>
<bold>False</bold>
<italics>False</italics>
<line_adjustment>0</line_adjustment>
<shadow shadowColor="#000000" shadowSize="5">True</shadow>
<outline outlineColor="#000000" outlineSize="2">False</outline>
<location override="False" x="10" y="690" width="1004" height="78"/>
</font>
<display>
<horizontalAlign>0</horizontalAlign>
<verticalAlign>0</verticalAlign>
<slideTransition>False</slideTransition>
</display>
</theme>
'''
class ThemeLevel(object):
"""
Provides an enumeration for the level a theme applies to
"""
Global = 1
Service = 2
Song = 3
class BackgroundType(object):
"""
Type enumeration for backgrounds.
"""
Solid = 0
Gradient = 1
Image = 2
Transparent = 3
@staticmethod
def to_string(background_type):
"""
Return a string representation of a background type.
"""
if background_type == BackgroundType.Solid:
return u'solid'
elif background_type == BackgroundType.Gradient:
return u'gradient'
elif background_type == BackgroundType.Image:
return u'image'
elif background_type == BackgroundType.Transparent:
return u'transparent'
@staticmethod
def from_string(type_string):
"""
Return a background type for the given string.
"""
if type_string == u'solid':
return BackgroundType.Solid
elif type_string == u'gradient':
return BackgroundType.Gradient
elif type_string == u'image':
return BackgroundType.Image
elif type_string == u'transparent':
return BackgroundType.Transparent
class BackgroundGradientType(object):
"""
Type enumeration for background gradients.
"""
Horizontal = 0
Vertical = 1
Circular = 2
LeftTop = 3
LeftBottom = 4
@staticmethod
def to_string(gradient_type):
"""
Return a string representation of a background gradient type.
"""
if gradient_type == BackgroundGradientType.Horizontal:
return u'horizontal'
elif gradient_type == BackgroundGradientType.Vertical:
return u'vertical'
elif gradient_type == BackgroundGradientType.Circular:
return u'circular'
elif gradient_type == BackgroundGradientType.LeftTop:
return u'leftTop'
elif gradient_type == BackgroundGradientType.LeftBottom:
return u'leftBottom'
@staticmethod
def from_string(type_string):
"""
Return a background gradient type for the given string.
"""
if type_string == u'horizontal':
return BackgroundGradientType.Horizontal
elif type_string == u'vertical':
return BackgroundGradientType.Vertical
elif type_string == u'circular':
return BackgroundGradientType.Circular
elif type_string == u'leftTop':
return BackgroundGradientType.LeftTop
elif type_string == u'leftBottom':
return BackgroundGradientType.LeftBottom
class HorizontalType(object):
"""
Type enumeration for horizontal alignment.
"""
Left = 0
Right = 1
Center = 2
Justify = 3
Names = [u'left', u'right', u'center', u'justify']
class VerticalType(object):
"""
Type enumeration for vertical alignment.
"""
Top = 0
Middle = 1
Bottom = 2
Names = [u'top', u'middle', u'bottom']
BOOLEAN_LIST = [u'bold', u'italics', u'override', u'outline', u'shadow',
u'slide_transition']
INTEGER_LIST = [u'size', u'line_adjustment', u'x', u'height', u'y',
u'width', u'shadow_size', u'outline_size', u'horizontal_align',
u'vertical_align', u'wrap_style']
class ThemeXML(object):
"""
A class to encapsulate the Theme XML.
"""
FIRST_CAMEL_REGEX = re.compile(u'(.)([A-Z][a-z]+)')
SECOND_CAMEL_REGEX = re.compile(u'([a-z0-9])([A-Z])')
def __init__(self):
"""
Initialise the theme object.
"""
# Create the minidom document
self.theme_xml = Document()
self.parse_xml(BLANK_THEME_XML)
def extend_image_filename(self, path):
"""
Add the path name to the image name so the background can be rendered.
``path``
The path name to be added.
"""
if self.background_type == u'image':
if self.background_filename and path:
self.theme_name = self.theme_name.strip()
self.background_filename = self.background_filename.strip()
self.background_filename = os.path.join(path, self.theme_name,
self.background_filename)
def _new_document(self, name):
"""
Create a new theme XML document.
"""
self.theme_xml = Document()
self.theme = self.theme_xml.createElement(u'theme')
self.theme_xml.appendChild(self.theme)
self.theme.setAttribute(u'version', u'2.0')
self.name = self.theme_xml.createElement(u'name')
text_node = self.theme_xml.createTextNode(name)
self.name.appendChild(text_node)
self.theme.appendChild(self.name)
def add_background_transparent(self):
"""
Add a transparent background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'transparent')
self.theme.appendChild(background)
def add_background_solid(self, bkcolor):
"""
Add a Solid background.
``bkcolor``
The color of the background.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'solid')
self.theme.appendChild(background)
self.child_element(background, u'color', unicode(bkcolor))
def add_background_gradient(self, startcolor, endcolor, direction):
"""
Add a gradient background.
``startcolor``
The gradient's starting colour.
``endcolor``
The gradient's ending colour.
``direction``
The direction of the gradient.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'gradient')
self.theme.appendChild(background)
# Create startColor element
self.child_element(background, u'startColor', unicode(startcolor))
# Create endColor element
self.child_element(background, u'endColor', unicode(endcolor))
# Create direction element
self.child_element(background, u'direction', unicode(direction))
def add_background_image(self, filename, borderColor):
"""
Add a image background.
``filename``
The file name of the image.
"""
background = self.theme_xml.createElement(u'background')
background.setAttribute(u'type', u'image')
self.theme.appendChild(background)
# Create Filename element
self.child_element(background, u'filename', filename)
# Create endColor element
self.child_element(background, u'borderColor', unicode(borderColor))
def add_font(self, name, color, size, override, fonttype=u'main',
bold=u'False', italics=u'False', line_adjustment=0,
xpos=0, ypos=0, width=0, height=0, outline=u'False',
outline_color=u'#ffffff', outline_pixel=2, shadow=u'False',
shadow_color=u'#ffffff', shadow_pixel=5):
"""
Add a Font.
``name``
The name of the font.
``color``
The colour of the font.
``size``
The size of the font.
``override``
Whether or not to override the default positioning of the theme.
``fonttype``
The type of font, ``main`` or ``footer``. Defaults to ``main``.
``weight``
The weight of then font Defaults to 50 Normal
``italics``
Does the font render to italics Defaults to 0 Normal
``xpos``
The X position of the text block.
``ypos``
The Y position of the text block.
``width``
The width of the text block.
``height``
The height of the text block.
``outline``
Whether or not to show an outline.
``outline_color``
The colour of the outline.
``outline_size``
How big the Shadow is
``shadow``
Whether or not to show a shadow.
``shadow_color``
The colour of the shadow.
``shadow_size``
How big the Shadow is
"""
background = self.theme_xml.createElement(u'font')
background.setAttribute(u'type', fonttype)
self.theme.appendChild(background)
# Create Font name element
self.child_element(background, u'name', name)
# Create Font color element
self.child_element(background, u'color', unicode(color))
# Create Proportion name element
self.child_element(background, u'size', unicode(size))
# Create weight name element
self.child_element(background, u'bold', unicode(bold))
# Create italics name element
self.child_element(background, u'italics', unicode(italics))
# Create indentation name element
self.child_element(background, u'line_adjustment', unicode(line_adjustment))
# Create Location element
element = self.theme_xml.createElement(u'location')
element.setAttribute(u'override', unicode(override))
element.setAttribute(u'x', unicode(xpos))
element.setAttribute(u'y', unicode(ypos))
element.setAttribute(u'width', unicode(width))
element.setAttribute(u'height', unicode(height))
background.appendChild(element)
# Shadow
element = self.theme_xml.createElement(u'shadow')
element.setAttribute(u'shadowColor', unicode(shadow_color))
element.setAttribute(u'shadowSize', unicode(shadow_pixel))
value = self.theme_xml.createTextNode(unicode(shadow))
element.appendChild(value)
background.appendChild(element)
# Outline
element = self.theme_xml.createElement(u'outline')
element.setAttribute(u'outlineColor', unicode(outline_color))
element.setAttribute(u'outlineSize', unicode(outline_pixel))
value = self.theme_xml.createTextNode(unicode(outline))
element.appendChild(value)
background.appendChild(element)
def add_display(self, horizontal, vertical, transition):
"""
Add a Display options.
``horizontal``
The horizontal alignment of the text.
``vertical``
The vertical alignment of the text.
``transition``
Whether the slide transition is active.
"""
background = self.theme_xml.createElement(u'display')
self.theme.appendChild(background)
# Horizontal alignment
element = self.theme_xml.createElement(u'horizontalAlign')
value = self.theme_xml.createTextNode(unicode(horizontal))
element.appendChild(value)
background.appendChild(element)
# Vertical alignment
element = self.theme_xml.createElement(u'verticalAlign')
value = self.theme_xml.createTextNode(unicode(vertical))
element.appendChild(value)
background.appendChild(element)
# Slide Transition
element = self.theme_xml.createElement(u'slideTransition')
value = self.theme_xml.createTextNode(unicode(transition))
element.appendChild(value)
background.appendChild(element)
def child_element(self, element, tag, value):
"""
Generic child element creator.
"""
child = self.theme_xml.createElement(tag)
child.appendChild(self.theme_xml.createTextNode(value))
element.appendChild(child)
return child
def set_default_header_footer(self):
"""
Set the header and footer size into the current primary screen.
10 px on each side is removed to allow for a border.
"""
current_screen = ScreenList().current
self.font_main_y = 0
self.font_main_width = current_screen[u'size'].width() - 20
self.font_main_height = current_screen[u'size'].height() * 9 / 10
self.font_footer_width = current_screen[u'size'].width() - 20
self.font_footer_y = current_screen[u'size'].height() * 9 / 10
self.font_footer_height = current_screen[u'size'].height() / 10
def dump_xml(self):
"""
Dump the XML to file used for debugging
"""
return self.theme_xml.toprettyxml(indent=u' ')
def extract_xml(self):
"""
Print out the XML string.
"""
self._build_xml_from_attrs()
return self.theme_xml.toxml(u'utf-8').decode(u'utf-8')
def extract_formatted_xml(self):
"""
Pull out the XML string formatted for human consumption
"""
self._build_xml_from_attrs()
return self.theme_xml.toprettyxml(indent=u' ', newl=u'\n', encoding=u'utf-8')
def parse(self, xml):
"""
Read in an XML string and parse it.
``xml``
The XML string to parse.
"""
self.parse_xml(unicode(xml))
def parse_xml(self, xml):
"""
Parse an XML string.
``xml``
The XML string to parse.
"""
# remove encoding string
line = xml.find(u'?>')
if line:
xml = xml[line + 2:]
try:
theme_xml = objectify.fromstring(xml)
except etree.XMLSyntaxError:
log.exception(u'Invalid xml %s', xml)
return
xml_iter = theme_xml.getiterator()
for element in xml_iter:
master = u''
if element.tag == u'background':
if element.attrib:
for attr in element.attrib:
self._create_attr(element.tag, attr, element.attrib[attr])
parent = element.getparent()
if parent is not None:
if parent.tag == u'font':
master = parent.tag + u'_' + parent.attrib[u'type']
# set up Outline and Shadow Tags and move to font_main
if parent.tag == u'display':
if element.tag.startswith(u'shadow') or element.tag.startswith(u'outline'):
self._create_attr(u'font_main', element.tag, element.text)
master = parent.tag
if parent.tag == u'background':
master = parent.tag
if master:
self._create_attr(master, element.tag, element.text)
if element.attrib:
for attr in element.attrib:
base_element = attr
# correction for the shadow and outline tags
if element.tag == u'shadow' or element.tag == u'outline':
if not attr.startswith(element.tag):
base_element = element.tag + u'_' + attr
self._create_attr(master, base_element, element.attrib[attr])
else:
if element.tag == u'name':
self._create_attr(u'theme', element.tag, element.text)
def _translate_tags(self, master, element, value):
"""
Clean up XML removing and redefining tags
"""
master = master.strip().lstrip()
element = element.strip().lstrip()
value = unicode(value).strip().lstrip()
if master == u'display':
if element == u'wrapStyle':
return True, None, None, None
if element.startswith(u'shadow') or element.startswith(u'outline'):
master = u'font_main'
# fix bold font
if element == u'weight':
element = u'bold'
if value == u'Normal':
value = False
else:
value = True
if element == u'proportion':
element = u'size'
return False, master, element, value
def _create_attr(self, master, element, value):
"""
Create the attributes with the correct data types and name format
"""
reject, master, element, value = self._translate_tags(master, element, value)
if reject:
return
field = self._de_hump(element)
tag = master + u'_' + field
if field in BOOLEAN_LIST:
setattr(self, tag, str_to_bool(value))
elif field in INTEGER_LIST:
setattr(self, tag, int(value))
else:
# make string value unicode
if not isinstance(value, unicode):
value = unicode(str(value), u'utf-8')
# None means an empty string so lets have one.
if value == u'None':
value = u''
setattr(self, tag, unicode(value).strip().lstrip())
def __str__(self):
"""
Return a string representation of this object.
"""
theme_strings = []
for key in dir(self):
if key[0:1] != u'_':
theme_strings.append(u'%30s: %s' % (key, getattr(self, key)))
return u'\n'.join(theme_strings)
def _de_hump(self, name):
"""
Change Camel Case string to python string
"""
sub_name = ThemeXML.FIRST_CAMEL_REGEX.sub(r'\1_\2', name)
return ThemeXML.SECOND_CAMEL_REGEX.sub(r'\1_\2', sub_name).lower()
def _build_xml_from_attrs(self):
"""
Build the XML from the varables in the object
"""
self._new_document(self.theme_name)
if self.background_type == BackgroundType.to_string(BackgroundType.Solid):
self.add_background_solid(self.background_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Gradient):
self.add_background_gradient(
self.background_start_color,
self.background_end_color,
self.background_direction
)
elif self.background_type == BackgroundType.to_string(BackgroundType.Image):
filename = os.path.split(self.background_filename)[1]
self.add_background_image(filename, self.background_border_color)
elif self.background_type == BackgroundType.to_string(BackgroundType.Transparent):
self.add_background_transparent()
self.add_font(
self.font_main_name,
self.font_main_color,
self.font_main_size,
self.font_main_override, u'main',
self.font_main_bold,
self.font_main_italics,
self.font_main_line_adjustment,
self.font_main_x,
self.font_main_y,
self.font_main_width,
self.font_main_height,
self.font_main_outline,
self.font_main_outline_color,
self.font_main_outline_size,
self.font_main_shadow,
self.font_main_shadow_color,
self.font_main_shadow_size
)
self.add_font(
self.font_footer_name,
self.font_footer_color,
self.font_footer_size,
self.font_footer_override, u'footer',
self.font_footer_bold,
self.font_footer_italics,
0, # line adjustment
self.font_footer_x,
self.font_footer_y,
self.font_footer_width,
self.font_footer_height,
self.font_footer_outline,
self.font_footer_outline_color,
self.font_footer_outline_size,
self.font_footer_shadow,
self.font_footer_shadow_color,
self.font_footer_shadow_size
)
self.add_display(
self.display_horizontal_align,
self.display_vertical_align,
self.display_slide_transition
)
|
marmyshev/transitions
|
openlp/core/lib/theme.py
|
Python
|
gpl-2.0
| 23,608
|
# -*- coding: utf-8 -*-
"""
Tests for auth manager PKI access to postgres.
This is an integration test for QGIS Desktop Auth Manager postgres provider that
checks if QGIS can use a stored auth manager auth configuration to access
a PKI protected postgres.
Configuration from the environment:
* QGIS_POSTGRES_SERVER_PORT (default: 55432)
* QGIS_POSTGRES_EXECUTABLE_PATH (default: /usr/lib/postgresql/9.4/bin)
From build dir, run: ctest -R PyQgsAuthManagerPKIPostgresTest -V
or, if your PostgreSQL path differs from the default:
QGIS_POSTGRES_EXECUTABLE_PATH=/usr/lib/postgresql/<your_version_goes_here>/bin \
ctest -R PyQgsAuthManagerPKIPostgresTest -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
import os
import time
import signal
import stat
import subprocess
import tempfile
import glob
from shutil import rmtree
from utilities import unitTestDataPath
from qgis.core import (
QgsApplication,
QgsAuthManager,
QgsAuthMethodConfig,
QgsVectorLayer,
QgsDataSourceUri,
QgsWkbTypes,
)
from qgis.PyQt.QtNetwork import QSslCertificate
from qgis.PyQt.QtCore import QFile
from qgis.testing import (
start_app,
unittest,
)
__author__ = 'Alessandro Pasotti'
__date__ = '25/10/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
QGIS_POSTGRES_SERVER_PORT = os.environ.get('QGIS_POSTGRES_SERVER_PORT', '55432')
QGIS_POSTGRES_EXECUTABLE_PATH = os.environ.get('QGIS_POSTGRES_EXECUTABLE_PATH', '/usr/lib/postgresql/9.4/bin')
assert os.path.exists(QGIS_POSTGRES_EXECUTABLE_PATH)
QGIS_AUTH_DB_DIR_PATH = tempfile.mkdtemp()
# Postgres test path
QGIS_PG_TEST_PATH = tempfile.mkdtemp()
os.environ['QGIS_AUTH_DB_DIR_PATH'] = QGIS_AUTH_DB_DIR_PATH
qgis_app = start_app()
QGIS_POSTGRES_CONF_TEMPLATE = """
hba_file = '%(tempfolder)s/pg_hba.conf'
listen_addresses = '*'
port = %(port)s
max_connections = 100
unix_socket_directories = '%(tempfolder)s'
ssl = true
ssl_ciphers = 'DEFAULT:!LOW:!EXP:!MD5:@STRENGTH' # allowed SSL ciphers
ssl_cert_file = '%(server_cert)s'
ssl_key_file = '%(server_key)s'
ssl_ca_file = '%(sslrootcert_path)s'
password_encryption = on
"""
QGIS_POSTGRES_HBA_TEMPLATE = """
hostssl all all 0.0.0.0/0 cert clientcert=1
hostssl all all ::1/0 cert clientcert=1
host all all 127.0.0.1/32 trust
host all all ::1/32 trust
"""
class TestAuthManager(unittest.TestCase):
@classmethod
def setUpAuth(cls):
"""Run before all tests and set up authentication"""
authm = QgsApplication.authManager()
assert (authm.setMasterPassword('masterpassword', True))
cls.pg_conf = os.path.join(cls.tempfolder, 'postgresql.conf')
cls.pg_hba = os.path.join(cls.tempfolder, 'pg_hba.conf')
# Client side
cls.sslrootcert_path = os.path.join(cls.certsdata_path, 'chains_subissuer-issuer-root_issuer2-root2.pem')
cls.sslcert = os.path.join(cls.certsdata_path, 'gerardus_cert.pem')
cls.sslkey = os.path.join(cls.certsdata_path, 'gerardus_key.pem')
assert os.path.isfile(cls.sslcert)
assert os.path.isfile(cls.sslkey)
assert os.path.isfile(cls.sslrootcert_path)
os.chmod(cls.sslcert, stat.S_IRUSR)
os.chmod(cls.sslkey, stat.S_IRUSR)
os.chmod(cls.sslrootcert_path, stat.S_IRUSR)
cls.auth_config = QgsAuthMethodConfig("PKI-Paths")
cls.auth_config.setConfig('certpath', cls.sslcert)
cls.auth_config.setConfig('keypath', cls.sslkey)
cls.auth_config.setName('test_pki_auth_config')
cls.username = 'Gerardus'
cls.sslrootcert = QSslCertificate.fromPath(cls.sslrootcert_path)
assert cls.sslrootcert is not None
authm.storeCertAuthorities(cls.sslrootcert)
authm.rebuildCaCertsCache()
authm.rebuildTrustedCaCertsCache()
authm.rebuildCertTrustCache()
assert (authm.storeAuthenticationConfig(cls.auth_config)[0])
assert cls.auth_config.isValid()
# Server side
cls.server_cert = os.path.join(cls.certsdata_path, 'localhost_ssl_cert.pem')
cls.server_key = os.path.join(cls.certsdata_path, 'localhost_ssl_key.pem')
cls.server_rootcert = cls.sslrootcert_path
os.chmod(cls.server_cert, stat.S_IRUSR)
os.chmod(cls.server_key, stat.S_IRUSR)
os.chmod(cls.server_rootcert, stat.S_IRUSR)
# Place conf in the data folder
with open(cls.pg_conf, 'w+') as f:
f.write(QGIS_POSTGRES_CONF_TEMPLATE % {
'port': cls.port,
'tempfolder': cls.tempfolder,
'server_cert': cls.server_cert,
'server_key': cls.server_key,
'sslrootcert_path': cls.sslrootcert_path,
})
with open(cls.pg_hba, 'w+') as f:
f.write(QGIS_POSTGRES_HBA_TEMPLATE)
@classmethod
def setUpClass(cls):
"""Run before all tests:
Creates an auth configuration"""
cls.port = QGIS_POSTGRES_SERVER_PORT
cls.dbname = 'test_pki'
cls.tempfolder = QGIS_PG_TEST_PATH
cls.certsdata_path = os.path.join(unitTestDataPath('auth_system'), 'certs_keys')
cls.hostname = 'localhost'
cls.data_path = os.path.join(cls.tempfolder, 'data')
os.mkdir(cls.data_path)
cls.setUpAuth()
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'initdb'), '-D', cls.data_path])
cls.server = subprocess.Popen([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'postgres'), '-D',
cls.data_path, '-c',
"config_file=%s" % cls.pg_conf],
env=os.environ,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait max 10 secs for the server to start
end = time.time() + 10
while True:
line = cls.server.stderr.readline()
print(line)
if line.find(b"database system is ready to accept") != -1:
break
if time.time() > end:
raise Exception("Timeout connecting to PostgreSQL")
# Create a DB
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'createdb'), '-h', 'localhost', '-p', cls.port, 'test_pki'])
# Inject test SQL from test path
test_sql = os.path.join(unitTestDataPath('provider'), 'testdata_pg.sql')
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-f', test_sql, cls.dbname])
# Create a role
subprocess.check_call([os.path.join(QGIS_POSTGRES_EXECUTABLE_PATH, 'psql'), '-h', 'localhost', '-p', cls.port, '-c', 'CREATE ROLE "%s" WITH SUPERUSER LOGIN' % cls.username, cls.dbname])
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
cls.server.terminate()
os.kill(cls.server.pid, signal.SIGABRT)
del cls.server
time.sleep(2)
rmtree(QGIS_AUTH_DB_DIR_PATH)
rmtree(cls.tempfolder)
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
@classmethod
def _getPostGISLayer(cls, type_name, layer_name=None, authcfg=None):
"""
PG layer factory
"""
if layer_name is None:
layer_name = 'pg_' + type_name
uri = QgsDataSourceUri()
uri.setWkbType(QgsWkbTypes.Point)
uri.setConnection("localhost", cls.port, cls.dbname, "", "", QgsDataSourceUri.SslVerifyFull, authcfg)
uri.setKeyColumn('pk')
uri.setSrid('EPSG:4326')
uri.setDataSource('qgis_test', 'someData', "geom", "", "pk")
# Note: do not expand here!
layer = QgsVectorLayer(uri.uri(False), layer_name, 'postgres')
return layer
def testValidAuthAccess(self):
"""
Access the protected layer with valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
def testInvalidAuthAccess(self):
"""
Access the protected layer with not valid credentials
"""
pg_layer = self._getPostGISLayer('testlayer_èé')
self.assertFalse(pg_layer.isValid())
def testRemoveTemporaryCerts(self):
"""
Check that no temporary cert remain after connection with
postgres provider
"""
def cleanTempPki():
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
for fn in pkies:
f = QFile(fn)
f.setPermissions(QFile.WriteOwner)
f.remove()
# remove any temppki in temprorary path to check that no
# other pki remain after connection
cleanTempPki()
# connect using postgres provider
pg_layer = self._getPostGISLayer('testlayer_èé', authcfg=self.auth_config.id())
self.assertTrue(pg_layer.isValid())
# do test no certs remained
pkies = glob.glob(os.path.join(tempfile.gettempdir(), 'tmp*_{*}.pem'))
self.assertEqual(len(pkies), 0)
if __name__ == '__main__':
unittest.main()
|
geopython/QGIS
|
tests/src/python/test_authmanager_pki_postgres.py
|
Python
|
gpl-2.0
| 9,727
|
#!/usr/bin/python
# This script looks up every pair team that exists.
# It then,for each pair team
# (4) creates a repo for the team (if it doesn't already exist)
# (5) populates it, but only if it was JUST created.
import getpass
import argparse
import os
import sys
from github_acadwf import addPyGithubToPath
from github_acadwf import updatePairsForLab
from github_acadwf import getenvOrDie
from github_acadwf import getCSVFromURL
addPyGithubToPath()
from github import Github
from github import GithubException
import getpass
import sys
import argparse
from github_acadwf import makeUserDict
from github_acadwf import getUserList
sys.path.append("./PyGithub");
from github import Github
from github import GithubException
GHA_GITHUB_ORG = getenvOrDie("GHA_GITHUB_ORG",
"Error: please set GHA_GITHUB_ORG to name of github organization for the course, e.g. UCSB-CS56-W14")
GHA_WORKDIR = getenvOrDie('GHA_WORKDIR',
"Error: please set GHA_WORKDIR to a writeable scratch directory")
GHA_STARTPOINT_DIR = getenvOrDie('GHA_STARTPOINT_DIR',
"Error: please set GHA_STARTPOINT_DIR to a readable directory")
# Now try to get the Google Spreadsheet Data
parser = argparse.ArgumentParser(description='Update lab for pairs')
parser.add_argument('lab',metavar='labxx',
help="which lab (e.g. lab00, lab01, etc.)")
parser.add_argument('-u','--githubUsername',
help="github username, default is current OS user",
default=getpass.getuser())
parser.add_argument('-t','--teamPrefix',
help="prefix of teams to create",
default="")
args = parser.parse_args()
if not os.access(GHA_WORKDIR, os.W_OK):
print(GHA_WORKDIR + " is not a writable directory.")
sys.exit(1)
pw = getpass.getpass()
g = Github(args.githubUsername, pw, user_agent="PyGithub")
org= g.get_organization(GHA_GITHUB_ORG)
updatePairsForLab(g,org,args.lab,GHA_WORKDIR, GHA_STARTPOINT_DIR, args.teamPrefix)
|
UCSB-CS-Using-GitHub-In-Courses/github-acad-scripts
|
updatePairsForLab.py
|
Python
|
mit
| 2,129
|
# -*- coding: utf-8 -*-
""" Auxiliary functions for light curve file handling.
Contains functions to extract Kepler PDCSAP and user-provided K2SFF light
curves.
"""
import numpy as np
from astropy.table import Table
from astropy.io import fits, ascii
import warnings
from astropy.utils.exceptions import AstropyUserWarning
def open_fits(filename):
""" Open a light curve file in the usual Kepler FITS format and extract
the PDCSAP light curve.
Parameters
----------
filename : str
file name of the FITS file containing the light curve data
Returns
-------
EPICno : int
EPIC number ('KEPLERID' in the hdu header)
photometry : Astropy table
Columns are time in BJD - 2454833, PDCSAP flux, PDCSAP flux error
Example
-------
>>> filename = 'tests/ktwo205919993-c03_llc.fits'
>>> EPICno, photometry = open_fits(filename)
"""
try:
hdulist = fits.open(filename)
except IOError:
warnings.warn("Could not open FITS file.", AstropyUserWarning)
return None
EPICno = hdulist[1].header['KEPLERID']
tbdata = hdulist[1].data
hdulist.close()
# extract light curve data from hdu
time = tbdata['TIME']
flux = tbdata['PDCSAP_FLUX']
flux_err = tbdata['PDCSAP_FLUX_ERR']
photometry = Table([time, flux, flux_err], names = ('TIME', 'FLUX','FLUX_ERR'))
# remove nans
photometry = photometry[~np.isnan(photometry['FLUX'])]
return EPICno, photometry
def open_csv(filename):
""" Open a light curve file in csv format and extract from it flux time
series.
Parameters
----------
filename : str
file name of the ascii file containing the photometry
Returns
-------
filename : str
The filename serves as a unique identifier for the object
photometry : Astropy table
Columns are named after the file header and contain time, flux
"""
photometry = ascii.read(filename, format='csv')
return filename, photometry
def open_k2sff(filename):
""" Extract a light curve from a 'K2SFF' ascii file. The default aperture
light curve data of this product are not strictly 'comma-separated' and
lead to crashes when opened by standard Astropy ascii I/O functions.
Parameters
----------
filename : str
file name of the ascii file containing the photometry
Returns
-------
filename : str
The filename serves as a unique identifier for the object
photometry : Astropy table
Columns contain time, flux
Example
-------
>>> filename = 'tests/220132548'
>>> filename, photometry = open_k2sff(filename)
"""
with open(filename, 'r') as infile:
lines = infile.readlines()
phot = np.zeros([len(lines) - 1, 2])
for i, line in enumerate(lines[1:]):
# strip trailing comma and '\n' and save to a table
line = line.rstrip(',\n')
line = line.split(',')
phot[i][:] = line
photometry = Table(phot, names = ('TIME', 'FLUX'))
# remove nans
photometry = photometry[~np.isnan(photometry['FLUX'])]
return filename.split('/')[-1], photometry
if __name__ == "__main__":
import doctest
doctest.testmod()
|
matiscke/lcps
|
lcps/lcps_io.py
|
Python
|
mit
| 3,351
|
from django.conf.urls import url
from apps.comment import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^captcha/$', views.captcha, name='captcha'),
url(r'^(\d+)/$', views.comment, name='comment'),
]
|
blackholll/loonblog
|
apps/comment/urls.py
|
Python
|
mit
| 234
|
import importlib
import os
import re
import socket
import sys
from django.utils.termcolors import colorize
def log(s):
sys.stdout.write(colorize(s, fg='cyan') + '\n')
# The normal scenario is that we use the hostname, but let's make it
# overridable, this is useful for dev and debugging.
IDEASCUBE_HOSTNAME = socket.gethostname() # Store it for later use.
IDEASCUBE_ID = os.environ.get('IDEASCUBE_ID', IDEASCUBE_HOSTNAME)
IDEASCUBE_ID = re.sub('[^\w_]', '', IDEASCUBE_ID)
log('IDEASCUBE_ID={}'.format(IDEASCUBE_ID))
# Every box will have some edge specific needs, such as a specific user model,
# we manage this with per box settings, but we want those specific settings
# to be versionned, for two reasons: easier to debug when there is no hidden
# local config, and easier to manage code upgrade.
_SETTINGS_PACKAGE = os.environ.get('IDEASCUBE_SETTINGS_PACKAGE', 'ideascube')
_SETTINGS_MODULE = '.conf.' + IDEASCUBE_ID
try:
sub = importlib.import_module(_SETTINGS_MODULE, package=_SETTINGS_PACKAGE)
except (ImportError, SystemError):
# No specific config for this box
log('Could not import settings from %s%s'
% (_SETTINGS_PACKAGE, _SETTINGS_MODULE))
from .conf import base as sub
log('Importing settings from %s' % sub.__name__)
ldict = locals()
for k in sub.__dict__:
if k.isupper() and not k.startswith('__') or not k.endswith('__'):
ldict[k] = sub.__dict__[k]
USER_DATA_FIELDS = []
for section, fields in USER_FORM_FIELDS: # pragma: no flakes
USER_DATA_FIELDS.extend(fields)
# Allow server settings to only define STORAGE_ROOT without needing to
# redefine all ROOTS like settings.
BACKUPED_ROOT = ldict.get('BACKUPED_ROOT') or os.path.join(STORAGE_ROOT, 'main') # pragma: no flakes
MEDIA_ROOT = ldict.get('MEDIA_ROOT') or os.path.join(BACKUPED_ROOT, 'media') # noqa
STATIC_ROOT = ldict.get('STATIC_ROOT') or os.path.join(STORAGE_ROOT, 'static') # pragma: no flakes
CATALOG_CACHE_ROOT = (
ldict.get('CATALOG_CACHE_ROOT') or '/var/cache/ideascube/catalog')
CATALOG_STORAGE_ROOT = (
ldict.get('CATALOG_STORAGE_ROOT')
or os.path.join(BACKUPED_ROOT, 'catalog'))
if not getattr(ldict, 'DATABASES', None):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BACKUPED_ROOT, 'default.sqlite'),
},
'transient': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(STORAGE_ROOT, 'transient.sqlite'), # pragma: no flakes
}
}
FILE_UPLOAD_PERMISSIONS = 0o644
|
ideascube/ideascube
|
ideascube/settings.py
|
Python
|
agpl-3.0
| 2,561
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Gruik coded by GuiguiAbloc
# http://blog.guiguiabloc.fr
# http://api.domogeek.fr
#
import web, sys, time
import json,hashlib,socket
from datetime import datetime,date,timedelta
import urllib, urllib2
from Daemon import Daemon
from xml.dom.minidom import parseString
import Holiday
import ClassTempo
import ClassSchoolCalendar
import ClassVigilance
import ClassGeoLocation
import ClassDawnDusk
import ClassWeather
import ClassEJP
# timeout in seconds
timeout = 10
socket.setdefaulttimeout(timeout)
school = ClassSchoolCalendar.schoolcalendar()
dayrequest = Holiday.jourferie()
temporequest = ClassTempo.EDFTempo()
vigilancerequest = ClassVigilance.vigilance()
geolocationrequest = ClassGeoLocation.geolocation()
dawnduskrequest = ClassDawnDusk.sunriseClass()
weatherrequest = ClassWeather.weather()
ejprequest = ClassEJP.EDFejp()
##########
# CONFIG #
##########
listenip = "0.0.0.0"
listenport = "80"
localapiurl= "http://api.domogeek.fr"
googleapikey = ''
bingmapapikey = ''
geonameskey = ''
worldweatheronlineapikey = ''
redis_host = "127.0.0.1"
redis_port = 6379
##############
# END CONFIG #
##############
##############
# Test REDIS #
##############
try:
import redis
except:
print "No Redis module : https://pypi.python.org/pypi/redis/"
sys.exit(1)
rc= redis.Redis(host=redis_host, port=redis_port)
rc.set("test", "ok")
rc.expire("test" ,10)
value = rc.get("test")
if value is None:
print "Could not connect to Redis " + redis_host + " port " + redis_port
web.config.debug = False
urls = (
'/holiday/(.*)', 'holiday',
'/tempoedf/(.*)', 'tempoedf',
'/ejpedf/(.*)', 'ejpedf',
'/schoolholiday/(.*)', 'schoolholiday',
'/weekend/(.*)', 'weekend',
'/holidayall/(.*)', 'holidayall',
'/vigilance/(.*)', 'vigilance',
'/geolocation/(.*)', 'geolocation',
'/sun/(.*)', 'dawndusk',
'/weather/(.*)', 'weather',
'/season(.*)', 'season',
'/myip(.*)', 'myip',
'/feastedsaint/(.*)', 'feastedsaint',
'/', 'index'
)
app = web.application(urls, globals())
class index:
def GET(self):
# redirect to the static file ...
raise web.seeother('/static/index.html')
"""
@api {get} /holiday/:date/:responsetype Holiday Status Request
@apiName GetHoliday
@apiGroup Domogeek
@apiDescription Ask to know if :date is a holiday
@apiParam {String} now Ask for today.
@apiParam {String} tomorrow Ask for tomorrow.
@apiParam {String} all Ask for all entry.
@apiParam {Datetime} D-M-YYYY Ask for specific date.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
Jour de Noel
HTTP/1.1 200 OK
no
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/holiday/now
curl http://api.domogeek.fr/holiday/now/json
curl http://api.domogeek.fr/holiday/all
curl http://api.domogeek.fr/holiday/25-12-2014/json
"""
class holiday:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /holiday/{now|tomorrow|date(D-M-YYYY)}\n"
try:
format = request[1]
except:
format = None
if request[0] == "now":
datenow = datetime.now()
year = datenow.year
month = datenow.month
day = datenow.day
result = dayrequest.estferie([day,month,year])
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": result})
else:
return result
if request[0] == "tomorrow":
datenow = datetime.now()
datetomorrow = datenow + timedelta(days=1)
year = datetomorrow.year
month = datetomorrow.month
day = datetomorrow.day
result = dayrequest.estferie([day,month,year])
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": result})
else:
return result
if request[0] == "all":
datenow = datetime.now()
year = datenow.year
listvalue = []
F, J, L = dayrequest.joursferies(year,1,'/')
for i in xrange(0,len(F)):
result = F[i], "%10s" % (J[i]), L[i]
listvalue.append(result)
response = json.dumps(listvalue)
return response
if request[0] != "now" and request[0] != "all" and request[0] != "tomorrow":
try:
daterequest = request[0]
result = daterequest.split('-')
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
try:
day = int(result[0])
month = int(result[1])
year = int(result[2])
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
if day > 31 or month > 12:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
result = dayrequest.estferie([day,month,year])
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": result})
else:
return result
"""
@api {get} /weekend/:daterequest/:responsetype Week-end Status Request
@apiName GetWeekend
@apiGroup Domogeek
@apiDescription Ask to know if :daterequest is a week-end day
@apiParam {String} daterequest Ask for specific date {now | tomorrow | D-M-YYYY}.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
True
HTTP/1.1 200 OK
False
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/weekend/now
curl http://api.domogeek.fr/weekend/tomorrow
curl http://api.domogeek.fr/weekend/now/json
curl http://api.domogeek.fr/weekend/16-07-2014/json
"""
class weekend:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /weekend/{now|tomorrow|date(D-M-YYYY)}\n"
try:
format = request[1]
except:
format = None
if request[0] == "now":
datenow = datetime.now()
daynow = datetime.now().weekday()
day = datenow.day
if daynow == 5 or daynow == 6:
result = "True"
else:
result = "False"
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"weekend": result})
else:
return result
if request[0] == "tomorrow":
today = date.today()
datetomorrow = today + timedelta(days=1)
day = datetomorrow.weekday()
if day == 5 or day == 6:
result = "True"
else:
result = "False"
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"weekend": result})
else:
return result
if request[0] != "now" and request[0] != "tomorrow":
try:
daterequest = request[0]
day,month,year = daterequest.split('-')
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
try:
int(day)
int(month)
int(year)
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
requestday = date(int(year),int(month),int(day)).weekday()
if requestday == 5 or requestday == 6:
result = "True"
else:
result = "False"
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"weekend": result})
else:
return result
"""
@api {get} /holidayall/:zone/:daterequest All Holidays Status Request
@apiName GetHolidayall
@apiGroup Domogeek
@apiDescription Ask to know if :daterequest is a holiday, school holiday and week-end day
@apiParam {String} zone School Zone (A, B or C).
@apiParam {String} daterequest Ask for specific date {now | tomorrow | D-M-YYYY}.
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"holiday": "False", "weekend": "False", "schoolholiday": "Vacances de printemps - Zone A"}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/holidayall/A/now
curl http://api.domogeek.fr/holidayall/A/tomorrow
curl http://api.domogeek.fr/holidayall/B/25-02-2014
"""
class holidayall:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /holidayall/{zone}/{now|tomorrow|date(D-M-YYYY)}\n"
try:
zone = request[0]
except:
return "Incorrect request : /holidayall/{zone}/{now|tomorrow|date(D-M-YYYY)}\n"
try:
zoneok = str(zone.upper())
except:
return "Wrong Zone (must be A, B or C)"
if len(zoneok) > 1:
return "Wrong Zone (must be A, B or C)"
if zoneok not in ["A","B","C"]:
return "Incorrect request : /holidayall/{zone}/{now|tomorrow|date(D-M-YYYY)}\n"
try:
daterequest = request[1]
except:
return "Incorrect request : /holidayall/{zone}/{now|tomorrow|date(D-M-YYYY)}\n"
if request[1] == "now":
try:
responseholiday = urllib2.urlopen(localapiurl+'/holiday/now')
responseschoolholiday = urllib2.urlopen(localapiurl+'/schoolholiday/'+zoneok+'/now')
responseweekend = urllib2.urlopen(localapiurl+'/weekend/now')
resultholiday = responseholiday.read()
resultschoolholiday = responseschoolholiday.read()
resultschoolholidays = resultschoolholiday.decode('utf-8')
resultweekend = responseweekend.read()
except:
return "no data available"
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": resultholiday, "schoolholiday": resultschoolholidays, "weekend": resultweekend}, ensure_ascii=False).encode('utf8')
if request[1] == "tomorrow":
try:
responseholiday = urllib2.urlopen(localapiurl+'/holiday/tomorrow')
responseschoolholiday = urllib2.urlopen(localapiurl+'/schoolholiday/'+zoneok+'/tomorrow')
responseweekend = urllib2.urlopen(localapiurl+'/weekend/tomorrow')
resultholiday = responseholiday.read()
resultschoolholiday = responseschoolholiday.read()
resultschoolholidays = resultschoolholiday.decode('utf-8')
resultweekend = responseweekend.read()
except:
return "no data available"
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": resultholiday, "schoolholiday": resultschoolholidays, "weekend": resultweekend}, ensure_ascii=False).encode('utf8')
if request[1] != "now" and request[1] != "tomorrow":
try:
daterequest = request[1]
day,month,year = daterequest.split('-')
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
try:
int(day)
int(month)
int(year)
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
try:
responseholiday = urllib2.urlopen(localapiurl+'/holiday/'+daterequest)
responseschoolholiday = urllib2.urlopen(localapiurl+'/schoolholiday/'+zoneok+'/'+daterequest)
responseweekend = urllib2.urlopen(localapiurl+'/weekend/'+daterequest)
resultholiday = responseholiday.read()
resultschoolholiday = responseschoolholiday.read()
resultschoolholidays = resultschoolholiday.decode('utf-8')
resultweekend = responseweekend.read()
except:
return "no data available"
web.header('Content-Type', 'application/json')
return json.dumps({"holiday": resultholiday, "schoolholiday": resultschoolholidays, "weekend": resultweekend}, ensure_ascii=False).encode('utf8')
"""
@api {get} /tempoedf/:date/:responsetype Tempo EDF color Request
@apiName GetTempo
@apiGroup Domogeek
@apiDescription Ask the EDF Tempo color
@apiParam {String} now Ask for today.
@apiParam {String} tomorrow Ask for tomorrow.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
Date: Thu, 03 Jul 2014 17:16:47 GMT
Server: localhost
{"tempocolor": "bleu"}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/tempoedf/now
curl http://api.domogeek.fr/tempoedf/now/json
curl http://api.domogeek.fr/tempoedf/tomorrow
curl http://api.domogeek.fr/tempoedf/tomorrow/json
"""
class tempoedf:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /tempoedf/{now | tomorrow}\n"
try:
format = request[1]
except:
format = None
if request[0] == "now":
try:
rediskeytemponow = hashlib.md5("temponow").hexdigest()
gettemponow = rc.get(rediskeytemponow)
if gettemponow is None:
result = temporequest.TempoToday()
rediskeytemponow = hashlib.md5("temponow").hexdigest()
rc.set(rediskeytemponow, result, 1800)
rc.expire(rediskeytemponow ,1800)
print "SET TEMPO NOW IN REDIS"
else:
result = gettemponow
print "FOUND TEMPO NOW IN REDIS"
except:
result = temporequest.TempoToday()
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"tempocolor": result})
else:
return result
if request[0] == "tomorrow":
try:
rediskeytempotomorrow = hashlib.md5("tempotomorrow").hexdigest()
gettempotomorrow = rc.get(rediskeytempotomorrow)
if gettempotomorrow is None:
result = temporequest.TempoTomorrow()
rediskeytempotomorrow = hashlib.md5("tempotomorrow").hexdigest()
rc.set(rediskeytempotomorrow, result, 1800)
rc.expire(rediskeytempotomorrow ,1800)
print "SET TEMPO TOMORROW IN REDIS"
else:
result = gettempotomorrow
print "FOUND TEMPO TOMORROW IN REDIS"
except:
result = temporequest.TempoTomorrow()
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"tempocolor": result})
else:
return result
web.badrequest()
return "Incorrect request : /tempoedf/{now | tomorrow}\n"
"""
@api {get} /schoolholiday/:zone/:daterequest/:responsetype School Holiday Status Request
@apiName GetSchoolHoliday
@apiGroup Domogeek
@apiDescription Ask to know if :daterequest is a school holiday (UTF-8 response)
@apiParam {String} zone School Zone (A, B or C).
@apiParam {String} daterequest Ask for specific date {now | all | D-M-YYYY}.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
Vacances de la Toussaint
HTTP/1.1 200 OK
False
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/schoolholiday/A/now
curl http://api.domogeek.fr/schoolholiday/A/now/json
curl http://api.domogeek.fr/schoolholiday/A/all
curl http://api.domogeek.fr/schoolholiday/A/25-12-2014/json
"""
class schoolholiday:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /schoolholiday/{zone}/{now|tomorrow|all|date(D-M-YYYY)}\n"
try:
zone = request[0]
except:
return "Incorrect request : /schoolholiday/{zone}/{now|tomorrow|all|date(D-M-YYYY)}\n"
try:
zoneok = str(zone.upper())
except:
return "Wrong Zone (must be A, B or C)"
if len(zoneok) > 1:
return "Wrong Zone (must be A, B or C)"
if zoneok not in ["A","B","C"]:
return "Incorrect request : /schoolholiday/{zone}/{now|tomorrow|all|date(D-M-YYYY)}\n"
try:
daterequest = request[1]
except:
return "Incorrect request : /schoolholiday/{zone}/{now|tomorrow|all|date(D-M-YYYY)}\n"
try:
format = request[2]
except:
format = None
datenow = datetime.now()
year = datenow.year
month = datenow.month
day = datenow.day
if daterequest == "now":
try:
rediskeyschoolholidaynow = hashlib.md5("schoolholidaynow"+zoneok).hexdigest()
getschoolholidaynow = rc.get(rediskeyschoolholidaynow)
if getschoolholidaynow is None:
result = school.isschoolcalendar(zoneok,day,month,year)
rediskeyschoolholidaynow = hashlib.md5("schoolholidaynow"+zoneok).hexdigest()
rc.set(rediskeyschoolholidaynow, result, 1800)
rc.expire(rediskeyschoolholidaynow ,1800)
print "SET SCHOOL HOLIDAY "+zoneok+ " NOW IN REDIS"
else:
result = getschoolholidaynow
print "FOUND SCHOOL HOLIDAY "+zoneok+" NOW IN REDIS"
except:
result = school.isschoolcalendar(zoneok,day,month,year)
if result == None or result == "None":
result = "False"
try:
description = result.decode('utf-8')
except:
description = result
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"schoolholiday": description}, ensure_ascii=False).encode('utf8')
else:
return description
if daterequest == "tomorrow":
datenow = datetime.now()
datetomorrow = datenow + timedelta(days=1)
yeartomorrow = datetomorrow.year
monthtomorrow = datetomorrow.month
daytomorrow = datetomorrow.day
try:
rediskeyschoolholidaytomorrow = hashlib.md5("schoolholidaytomorrow"+zoneok).hexdigest()
getschoolholidaytomorrow = rc.get(rediskeyschoolholidaytomorrow)
if getschoolholidaytomorrow is None:
result = school.isschoolcalendar(zoneok,daytomorrow,monthtomorrow,yeartomorrow)
rediskeyschoolholidaytomorrow = hashlib.md5("schoolholidaytomorrow"+zoneok).hexdigest()
rc.set(rediskeyschoolholidaytomorrow, result, 1800)
rc.expire(rediskeyschoolholidaytomorrow ,1800)
print "SET SCHOOL HOLIDAY "+zoneok+ " TOMORROW IN REDIS"
else:
result = getschoolholidaytomorrow
print "FOUND SCHOOL HOLIDAY "+zoneok+" TOMORROW IN REDIS"
except:
result = school.isschoolcalendar(zoneok,daytomorrow,monthtomorrow,yeartomorrow)
if result == None or result == "None":
result = "False"
try:
description = result.decode('utf-8')
except:
description = result
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"schoolholiday": description}, ensure_ascii=False).encode('utf8')
else:
return description
if daterequest == "all":
result = school.getschoolcalendar(zone)
try:
description = result.decode('unicode_escape')
except:
description = result
web.header('Content-Type', 'application/json')
return description
if daterequest != "now" and daterequest != "all" and daterequest != "tomorrow":
try:
result = daterequest.split('-')
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
try:
day = int(result[0])
month = int(result[1])
year = int(result[2])
except:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
if day > 31 or month > 12:
web.badrequest()
return "Incorrect date format : D-M-YYYY\n"
result = school.isschoolcalendar(zoneok,day,month,year)
if result == None :
result = "False"
try:
description = result.decode('utf-8')
except:
description = result
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"schoolholiday": description}, ensure_ascii=False).encode('utf8')
else:
return description
"""
@api {get} /vigilance/:department/:vigilancerequest/:responsetype Vigilance MeteoFrance
@apiName GetVigilance
@apiGroup Domogeek
@apiDescription Ask Vigilance MeteoFrance for :department
@apiParam {String} department Department number (France Metropolitan).
@apiParam {String} vigilancerequest Vigilance request {color|risk|flood|all}.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"vigilanceflood": "jaune", "vigilancecolor": "orange", "vigilancerisk": "orages"}
HTTP/1.1 200 OK
vert
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/vigilance/29/color
curl http://api.domogeek.fr/vigilance/29/color/json
curl http://api.domogeek.fr/vigilance/29/risk/json
curl http://api.domogeek.fr/vigilance/29/all
"""
class vigilance:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /vigilance/{department}/{color|risk|flood|all}\n"
try:
dep = request[0]
except:
web.badrequest()
return "Incorrect request : /vigilance/{department}/{color|risk|flood|all}\n"
try:
vigilancequery = request[1]
except:
web.badrequest()
return "Incorrect request : /vigilance/{department}/{color|risk|flood|all}\n"
try:
format = request[2]
except:
format = None
if len(dep) > 2:
web.badrequest()
return "Incorrect request : /vigilance/{department number}/{color|risk|flood|all}\n"
if vigilancequery not in ["color","risk","flood", "all"]:
web.badrequest()
return "Incorrect request : /vigilance/{department}/{color|risk|flood|all}\n"
if dep == "92" or dep == "93" or dep == "94":
dep = "75"
if dep == "20":
dep = "2A"
try:
rediskeyvigilance = hashlib.md5(dep+"vigilance").hexdigest()
getvigilance = rc.get(rediskeyvigilance)
if getvigilance is None:
result = vigilancerequest.getvigilance(dep)
rediskeyvigilance = hashlib.md5(dep+"vigilance").hexdigest()
rc.set(rediskeyvigilance, result, 1800)
rc.expire(rediskeyvigilance ,1800)
print "SET VIGILANCE "+dep+" IN REDIS"
else:
tr1 = getvigilance.replace("(","")
tr2 = tr1.replace(")","")
tr3 = tr2.replace("'","")
tr4 = tr3.replace(" ","")
result = tr4.split(',')
print "FOUND VIGILANCE "+dep+" IN REDIS"
except:
result = vigilancerequest.getvigilance(dep)
color = result[0]
risk = result[1]
flood = result[2]
if vigilancequery == "color":
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"vigilancecolor": color})
else:
return color
if vigilancequery == "risk":
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"vigilancerisk": risk})
else:
return risk
if vigilancequery == "flood":
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"vigilanceflood": flood})
else:
return flood
if vigilancequery == "all":
web.header('Content-Type', 'application/json')
return json.dumps({"vigilancecolor": color, "vigilancerisk": risk, "vigilanceflood": flood})
"""
@api {get} /geolocation/:city City Geolocation
@apiName GetGeolocation
@apiGroup Domogeek
@apiDescription Ask geolocation (latitude/longitude) :city
@apiParam {String} city City name (avoid accents, no space, no guarantee works other than France Metropolitan).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"latitude": 48.390394000000001, "longitude": -4.4860759999999997}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/geolocation/brest
"""
class geolocation:
def GET(self,uri):
checkgoogle = False
checkbing = False
checkgeonames = False
inredis = False
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /geolocation/{city}\n"
try:
city = request[0]
except:
return "Incorrect request : /geolocation/{city}\n"
try:
rediskey = hashlib.md5(city).hexdigest()
getlocation = rc.get(rediskey)
if getlocation is None:
pass
else:
print "FOUND LOCATION IN REDIS !!!"
inredis = "ok"
tr1 = getlocation.replace("(","")
tr2 = tr1.replace(")","")
data = tr2.split(',')
web.header('Content-Type', 'application/json')
return json.dumps({"latitude": float(data[0]), "longitude": float(data[1])})
except:
pass
if googleapikey == '' or inredis == "ok":
pass
else:
try:
data = geolocationrequest.geogoogle(city, googleapikey)
checkgoogle = True
rediskey = hashlib.md5(city).hexdigest()
rc.set(rediskey, (data[0], data[1]))
web.header('Content-Type', 'application/json')
return json.dumps({"latitude": data[0], "longitude": data[1]})
except:
print "NO VALUE FROM GOOGLE"
if bingmapapikey == '' or inredis == "ok":
pass
else:
if checkgoogle:
pass
else:
try:
data = geolocationrequest.geobing(city, bingmapapikey)
except:
print "NO VALUE FROM BING"
data = False
if not data :
print "NO BING"
else:
checkbing = True
rediskey = hashlib.md5(city).hexdigest()
rc.set(rediskey, (data[0], data[1]))
web.header('Content-Type', 'application/json')
return json.dumps({"latitude": data[0], "longitude": data[1]})
if geonameskey == '' or inredis == "ok":
pass
else:
if checkbing:
pass
else:
try:
data = geolocationrequest.geonames(city, geonameskey)
except:
print "NO VALUE FROM GEONAMES"
data = False
if not data :
print "NO VALUE FROM GEONAMES"
else:
checkgeonames = True
rediskey = hashlib.md5(city).hexdigest()
rc.set(rediskey, (data[0], data[1]))
web.header('Content-Type', 'application/json')
return json.dumps({"latitude": data[0], "longitude": data[1]})
if not checkgoogle and not checkbing and not checkgeonames and not inredis:
return "NO GEOLOCATION DATA AVAILABLE\n"
"""
@api {get} /sun/:city/:sunrequest/:date/:responsetype Sun Status Request
@apiName GetSun
@apiGroup Domogeek
@apiDescription Ask to know sunrise, sunset, zenith, day duration for :date in :city (France)
@apiParam {String} city City name (avoid accents, no space, France Metropolitan).
@apiParam {String} sunrequest Ask for {sunrise | sunset | zenith | dayduration | all}.
@apiParam {String} date Date request {now | tomorrow}.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"sunset": "20:59"}
HTTP/1.1 200 OK
{"dayduration": "15:06", "sunset": "21:18", "zenith": "13:44", "sunrise": "6:11"}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/sun/brest/all/now
curl http://api.domogeek.fr/sun/bastia/sunset/now/json
curl http://api.domogeek.fr/sun/strasbourg/sunrise/tomorrow
"""
class dawndusk:
def GET(self,uri):
getutc = float(time.strftime("%z")[:3])
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
city = request[0]
except:
web.badrequest()
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
if len(city) < 1:
web.badrequest()
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
print str(city)
except UnicodeEncodeError:
web.badrequest()
return "Incorrect city format : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
dawnduskrequestelement = request[1]
except:
web.badrequest()
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
daterequest = request[2]
except:
web.badrequest()
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
format = request[3]
except:
format = None
if dawnduskrequestelement not in ["sunrise", "sunset", "zenith", "dayduration", "all"]:
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
try:
rediskey = hashlib.md5(city).hexdigest()
getlocation = rc.get(rediskey)
if getlocation is None:
print "NO KEY IN REDIS"
responsegeolocation = urllib2.urlopen(localapiurl+'/geolocation/'+city)
resultgeolocation = json.load(responsegeolocation)
latitude = resultgeolocation["latitude"]
longitude = resultgeolocation["longitude"]
else:
print "FOUND LOCATION IN REDIS !!!"
tr1 = getlocation.replace("(","")
tr2 = tr1.replace(")","")
data = tr2.split(',')
latitude = float(data[0])
longitude = float(data[1])
except:
return "no data available"
if request[2] == "now":
today=date.today()
elif request[2] == "tomorrow":
today = date.today() + timedelta(days=1)
else:
return "Incorrect request : /sun/city/{sunrise|sunset|zenith|dayduration|all}/{now|tomorrow}\n"
dawnduskrequest.setNumericalDate(today.day,today.month,today.year)
dawnduskrequest.setLocation(latitude, longitude)
dawnduskrequest.calculateWithUTC(getutc)
sunrise = dawnduskrequest.sunriseTime
zenith = dawnduskrequest.meridianTime
sunset = dawnduskrequest.sunsetTime
dayduration =dawnduskrequest.durationTime
if request[2] == "now" and dawnduskrequestelement == "all" :
web.header('Content-Type', 'application/json')
return json.dumps({"sunrise": sunrise, "zenith": zenith, "sunset": sunset, "dayduration": dayduration})
if request[2] == "now" and dawnduskrequestelement == "sunrise" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"sunrise": sunrise})
else:
return sunrise
if request[2] == "now" and dawnduskrequestelement == "sunset" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"sunset": sunset})
else:
return sunset
if request[2] == "now" and dawnduskrequestelement == "zenith" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"zenith": zenith})
else:
return zenith
if request[2] == "now" and dawnduskrequestelement == "dayduration" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"dayduration": dayduration})
else:
return dayduration
if request[2] == "tomorrow" and dawnduskrequestelement == "all" :
web.header('Content-Type', 'application/json')
return json.dumps({"sunrise": sunrise, "zenith": zenith, "sunset": sunset, "dayduration": dayduration})
if request[2] == "tomorrow" and dawnduskrequestelement == "sunrise" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"sunrise": sunrise})
else:
return sunrise
if request[2] == "tomorrow" and dawnduskrequestelement == "sunset" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"sunset": sunset})
else:
return sunset
if request[2] == "tomorrow" and dawnduskrequestelement == "zenith" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"zenith": zenith})
else:
return zenith
if request[2] == "tomorrow" and dawnduskrequestelement == "dayduration" :
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"dayduration": dayduration})
else:
return dayduration
"""
@api {get} /weather/:city/:weatherrequest/:date/:responsetype Weather Status Request
@apiName GetWeather
@apiGroup Domogeek
@apiDescription Ask for weather (temperature, humidity, pressure, windspeed...) for :date in :city (France)
@apiParam {String} city City name (avoid accents, no space, France Metropolitan).
@apiParam {String} weatherrequest Ask for {temperature|humidity[pressure|windspeed|weather|rain|all}.
@apiParam {String} date Date request {today | tomorrow}.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{u'min': 15.039999999999999, u'max': 20.34, u'eve': 19.989999999999998, u'morn': 20.34, u'night': 15.039999999999999, u'day': 20.34}
HTTP/1.1 200 OK
{"pressure": 1031.0799999999999}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/weather/brest/all/today
curl http://api.domogeek.fr/weather/brest/pressure/today/json
curl http://api.domogeek.fr/weather/brest/weather/tomorrow
curl http://api.domogeek.fr/weather/brest/rain/today
"""
class weather:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /weather/city/{temperature|humidity|pressure|weather|windspeed|rain|all}/{today|tomorrow}\n"
try:
city = request[0]
except:
return "Incorrect request : /weather/city/{temperature|humidity|pressure|weather|windspeed|rain|all}/{today|tomorrow}\n"
try:
weatherrequestelement = request[1]
except:
return "Incorrect request : /weather/city/{temperature|humidity|pressure|weather|windspeed|rain|all}/{today|tomorrow}\n"
try:
daterequest = request[2]
except:
return "Incorrect request : /weather/city/{temperature|humidity|pressure|weather|windspeed|rain|all}/{today|tomorrow}\n"
try:
format = request[3]
except:
format = None
if weatherrequestelement not in ["temperature", "humidity", "pressure", "weather", "windspeed", "rain", "all"]:
return "Incorrect request : /weather/city/{temperature|humidity|pressure|weather|windspeed|rain|all}/{today|tomorrow}\n"
try:
rediskey = hashlib.md5(city).hexdigest()
getlocation = rc.get(rediskey)
if getlocation is None:
print "NO KEY IN REDIS"
responsegeolocation = urllib2.urlopen(localapiurl+'/geolocation/'+city)
resultgeolocation = json.load(responsegeolocation)
latitude = resultgeolocation["latitude"]
longitude = resultgeolocation["longitude"]
else:
print "FOUND LOCATION IN REDIS !!!"
tr1 = getlocation.replace("(","")
tr2 = tr1.replace(")","")
data = tr2.split(',')
latitude = float(data[0])
longitude = float(data[1])
except:
return "no data available"
if request[2] == "today":
todayweather = weatherrequest.todayopenweathermap(latitude, longitude, weatherrequestelement)
datenow = datetime.now()
datetoday = datenow.strftime('%Y-%m-%d')
try:
rediskeytodayrain = hashlib.md5(str(latitude)+str(longitude)+str(datetoday)).hexdigest()
gettodayrain = rc.get(rediskeytodayrain)
if gettodayrain is None:
todayrain = weatherrequest.getrain(latitude, longitude, worldweatheronlineapikey, datetoday)
rediskeytodayrain = hashlib.md5(str(latitude)+str(longitude)+str(datetoday)).hexdigest()
rc.set(rediskeytodayrain, todayrain)
rc.expire(rediskeytodayrain, 3600)
print "SET RAIN IN REDIS"
else:
todayrain = gettodayrain
print "FOUND RAIN IN REDIS"
except:
todayrain = weatherrequest.getrain(latitude, longitude, worldweatheronlineapikey, datetoday)
if weatherrequestelement != "all" or weatherrequestelement != "temperature" or weatherrequestelement != "weather":
if format == "json":
web.header('Content-Type', 'application/json')
if weatherrequestelement == "humidity":
return json.dumps({"humidity": todayweather})
if weatherrequestelement == "pressure":
return json.dumps({"pressure": todayweather})
if weatherrequestelement == "windspeed":
return json.dumps({"windspeed": todayweather})
if weatherrequestelement == "rain":
return json.dumps({"rain": todayrain})
else:
if weatherrequestelement == "rain":
return todayrain
else:
return todayweather
else:
return todayweather
if request[2] == "tomorrow":
tomorrowweather = weatherrequest.tomorrowopenweathermap(latitude, longitude, weatherrequestelement)
datenow = datetime.now()
tomorrow = datenow + timedelta(days=1)
datetomorrow = tomorrow.strftime('%Y-%m-%d')
try:
rediskeytomorrowrain = hashlib.md5(str(latitude)+str(longitude)+str(datetomorrow)).hexdigest()
gettomorrowrain = rc.get(rediskeytomorrowrain)
if gettomorrowrain is None:
tomorrowrain = weatherrequest.getrain(latitude, longitude, worldweatheronlineapikey, datetomorrow)
rediskeytomorrowrain = hashlib.md5(str(latitude)+str(longitude)+str(datetomorrow)).hexdigest()
rc.set(rediskeytomorrowrain, tomorrowrain)
rc.expire(rediskeytomorrowrain, 3600)
print "SET RAIN IN REDIS"
else:
tomorrowrain = gettomorrowrain
print "FOUND RAIN IN REDIS"
except:
tomorrowrain = weatherrequest.getrain(latitude, longitude, worldweatheronlineapikey, datetomorrow)
if weatherrequestelement != "all" or weatherrequestelement != "temperature" or weatherrequestelement != "weather":
if format == "json":
web.header('Content-Type', 'application/json')
if weatherrequestelement == "humidity":
return json.dumps({"humidity": tomorrowweather})
if weatherrequestelement == "pressure":
return json.dumps({"pressure": tomorrowweather})
if weatherrequestelement == "windspeed":
return json.dumps({"windspeed": tomorrowweather})
if weatherrequestelement == "rain":
return json.dumps({"rain": tomorrowrain})
else:
if weatherrequestelement == "rain":
return tomorrowrain
else:
return tomorrowweather
else:
return tomorrowweather
"""
@api {get} /myip/:responsetype Display Public IP
@apiName GetMyPublicIP
@apiGroup Domogeek
@apiDescription Display your public IP
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"myip": "1.1.1.1"}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/myip
curl http://api.domogeek.fr/myip/json
"""
class myip:
def GET(self,uri,ip=None):
try:
request = uri.split('/')
except:
pass
try:
format = request[1]
print format
except:
format = None
ip = web.ctx.env.get('HTTP_X_FORWARDED_FOR', web.ctx.get('ip', ''))
for ip in ip.split(','):
ip = ip.strip()
try:
socket.inet_aton(ip)
if format == "json":
web.header('Cache-control', 'public,max-age=0')
web.header('Content-Type', 'application/json')
return json.dumps({"myip": ip})
else:
web.header('Cache-control', 'public,max-age=0')
return ip
except socket.error:
web.badrequest()
pass
"""
@api {get} /season/:responsetype Display Current Season
@apiName GetSeason
@apiGroup Domogeek
@apiDescription Display current season
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"season": "winter"}
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/season
curl http://api.domogeek.fr/season/json
"""
class season:
def GET(self,uri):
try:
request = uri.split('/')
except:
pass
try:
format = request[1]
except:
format = None
today = datetime.today().timetuple().tm_yday
spring = range(80, 172)
summer = range(172, 264)
autumn = range(264, 355)
if today in spring:
season = 'spring'
elif today in summer:
season = 'summer'
elif today in autumn:
season = 'autumn'
else:
season = 'winter'
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"season": season})
else:
return season
"""
@api {get} /ejpedf/:zone/:date/:responsetype EJP EDF Status Request
@apiName GetEJP
@apiGroup Domogeek
@apiDescription Ask for EJP EDF Status
@apiParam {String} zone Specify Zone Request {nord|sud|ouest|paca}
@apiParam {String} date Ask for today or tomorrow {today|tomorrow}
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
{"ejp": "False"}
Return "True" : EJP day
Return "False": No EJP day
Return "ND" : Non Specified
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/ejpedf/nord/today
curl http://api.domogeek.fr/ejpedf/sud/tomorrow
curl http://api.domogeek.fr/ejpedf/paca/today/json
"""
class ejpedf:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /edfejp/{nord|sud|ouest|paca}/{today|tomorrow}\n"
try:
zone = request[0]
except:
web.badrequest()
return "Incorrect request : /edfejp/{nord|sud|ouest|paca}/{today|tomorrow}\n"
try:
daterequest = request[1]
except:
web.badrequest()
return "Incorrect request : /edfejp/{nord|sud|ouest|paca}/{today|tomorrow}\n"
try:
format = request[2]
except:
format = None
try:
zoneok = str(zone.lower())
except:
web.badrequest()
return "Incorrect request : /edfejp/{nord|sud|ouest|paca}/{today|tomorrow}\n"
if zoneok not in ["nord", "sud", "paca", "ouest"]:
web.badrequest()
return "Incorrect request : /edfejp/{nord|sud|ouest|paca}/{today|tomorrow}\n"
if request[1] == "today":
try:
rediskeyejptoday = hashlib.md5("ejptoday"+zoneok).hexdigest()
getejptoday = rc.get(rediskeyejptoday)
if getejptoday is None:
result = ejprequest.EJPToday(zoneok)
rediskeyejptoday = hashlib.md5("ejptoday"+zoneok).hexdigest()
rc.set(rediskeyejptoday, result)
rc.expire(rediskeyejptoday, 1800)
print "SET EJP "+zoneok+" TODAY IN REDIS"
else:
result = getejptoday
print "FOUND EJP "+zoneok+ " TODAY IN REDIS"
except:
result = ejprequest.EJPToday(zoneok)
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"ejp": result})
else:
return result
if request[1] == "tomorrow":
try:
rediskeyejptomorrow = hashlib.md5("ejptomorrow"+zoneok).hexdigest()
getejptomorrow = rc.get(rediskeyejptomorrow)
if getejptomorrow is None:
result = ejprequest.EJPTomorrow(zoneok)
rediskeyejptomorrow = hashlib.md5("ejptomorrow"+zoneok).hexdigest()
rc.set(rediskeyejptomorrow, result)
rc.expire(rediskeyejptomorrow, 1800)
print "SET EJP "+zoneok+" TOMORROW IN REDIS"
else:
result = getejptomorrow
print "FOUND EJP "+zoneok+ " TOMORROW IN REDIS"
except:
result = ejprequest.EJPTomorrow(zoneok)
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"ejp": result})
else:
return result
"""
@api {get} /feastedsaint/:date/:responsetype Feasted Day of Saint Request
@apiName GetFeastedSaintDay
@apiGroup Domogeek
@apiDescription Ask to know feasted Saint for :date or date for :name
@apiParam {String} now Ask for today.
@apiParam {String} tomorrow Ask for tomorrow.
@apiParam {String} name Search feasted saint day for name.
@apiParam {Datetime} D-M Ask for specific date.
@apiParam {String} [responsetype] Specify Response Type (raw by default or specify json, only for single element).
@apiSuccessExample Success-Response:
HTTP/1.1 200 OK
Guillaume
10-1
@apiErrorExample Error-Response:
HTTP/1.1 400 Bad Request
400 Bad Request
@apiExample Example usage:
curl http://api.domogeek.fr/feastedsaint/guillaume
curl http://api.domogeek.fr/feastedsaint/now
curl http://api.domogeek.fr/feastedsaint/now/json
curl http://api.domogeek.fr/feastedsaint/1-5
curl http://api.domogeek.fr/feastedsaint/2-12/json
"""
class feastedsaint:
def GET(self,uri):
request = uri.split('/')
if request == ['']:
web.badrequest()
return "Incorrect request : /feastedsaint/{now|tomorrow|date(D-M)|name}\n"
try:
format = request[1]
except:
format = None
if request[0] == "now":
datenow = datetime.now()
month = datenow.month
day = datenow.day
todayrequest = str(day)+"-"+str(month)
rediskeyfeastedsaint = hashlib.md5(todayrequest+"feastedsaint").hexdigest()
result = rc.get(rediskeyfeastedsaint)
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"feastedsaint": result})
else:
return result
if request[0] == "tomorrow":
datenow = datetime.now()
datetomorrow = datenow + timedelta(days=1)
month = datetomorrow.month
day = datetomorrow.day
todayrequest = str(day)+"-"+str(month)
rediskeyfeastedsaint = hashlib.md5(todayrequest+"feastedsaint").hexdigest()
result = rc.get(rediskeyfeastedsaint)
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"feastedsaint": result})
else:
return result
if request[0] != "now" and request[0] != "tomorrow":
try:
daterequest = request[0]
result = daterequest.split('-')
except:
web.badrequest()
return "Incorrect date format : D-M\n"
try:
day = int(result[0])
month = int(result[1])
except:
try:
namerequest = request[0]
namesearch = namerequest.lower()
rediskeynamefeastedsaint = hashlib.md5(namesearch+"feastedsaint").hexdigest()
result = rc.get(rediskeynamefeastedsaint)
if result is None:
result = "no name found or incorrect date format"
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"feastedsaint": result})
else:
return result
except:
web.badrequest()
return "Incorrect date format : D-M\n"
if day > 31 or month > 12:
web.badrequest()
return "Incorrect date format : D-M\n"
todayrequest = str(day)+"-"+str(month)
rediskeyfeastedsaint = hashlib.md5(todayrequest+"feastedsaint").hexdigest()
result = rc.get(rediskeyfeastedsaint)
if format == "json":
web.header('Content-Type', 'application/json')
return json.dumps({"feastedsaint": result})
else:
return result
class MyDaemon(Daemon):
def run(self):
app.run()
if __name__ == "__main__":
service = MyDaemon('/tmp/apidomogeek.pid')
if len(sys.argv) == 2:
if 'start' == sys.argv[1]:
sys.argv[1] = listenip+':'+listenport
service.start()
elif 'stop' == sys.argv[1]:
service.stop()
elif 'restart' == sys.argv[1]:
service.restart()
elif 'console' == sys.argv[1]:
sys.argv[1] = listenip+':'+listenport
service.console()
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
else:
print "usage: %s start|stop|restart|console" % sys.argv[0]
sys.exit(2)
|
guiguiabloc/api-domogeek
|
apidomogeek.py
|
Python
|
gpl-2.0
| 51,860
|
from setuptools import setup, find_packages
setup(
name = "dwpwg",
use_scm_version = True,
author = "Raspberry Aether",
author_email = "raspberryaether@riseup.net",
description = "(d)ice(w)are (p)ass(w)ord (g)enerator",
keywords = ("diceware password passwords passphrase passphrases " +
"security bitcoins monero crypto cryptocurrency " +
"key keys keyphrase keyphrases encryption nsa " +
"surveillance privacy private secret secrecy " +
"generate generator dice d20 brainwallet wallet").split(),
url = "https://github.com/raspberryaether/dwpwg",
packages = find_packages(),
include_package_data=True,
entry_points = {
'console_scripts': [
'dwpwg = dwpwg.pwgen:main'
]
},
classifiers = [
"Development Status :: 3 - Alpha",
"Environment :: Console",
"License :: OSI Approved :: GNU General Public License v2 (GPLv2)",
"Natural Language :: English",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Topic :: Security",
"Topic :: Security :: Cryptography",
"Topic :: Utilities"
],
install_requires = [
"pyparsing"
],
setup_requires = [
"setuptools_scm"
]
)
|
raspberryaether/dwpwg
|
setup.py
|
Python
|
gpl-2.0
| 1,336
|
# -*- coding: utf-8 -*-
"""
Plex Server
Who is watching what?
"""
import os
from plexapi.server import PlexServer
from plexapi.exceptions import NotFound, Unauthorized
from plexapi.myplex import MyPlexAccount
from pkm import log, utils, SHAREDIR
from pkm.decorators import never_raise, threaded_method
from pkm.exceptions import ValidationError
from pkm.plugin import BasePlugin, BaseConfig
from pkm.filters import register_filter
from plexapi.video import Episode
NAME = 'Plex Server'
class Plugin(BasePlugin):
DEFAULT_INTERVAL = 60
@threaded_method
def enable(self):
try:
self.plex = fetch_plex_instance(self.pkmeter)
super(Plugin, self).enable()
except NotFound:
log.warning('Plex server not available.')
return self.disable()
@never_raise
def update(self):
self.data.update(plex_dict(self.plex))
self.data['videos'] = []
for video in self.plex.sessions():
vinfo = {
'user': video.usernames[0],
'type': video.type,
'thumb': video.thumbUrl,
'year': video.year,
'duration': video.duration,
'viewoffset': video.viewOffset,
'percent': round((video.viewOffset / video.duration) * 100),
'player': video.players[0].device if video.players else 'NA',
'state': video.players[0].state if video.players else 'NA',
'title': self._video_title(video), # keep this last
}
if vinfo['state'] != 'paused':
self.data['videos'].append(vinfo)
super(Plugin, self).update()
def _plex_address(self):
return 'http://%s:%s' % (self.plex.address, self.plex.port)
def _video_title(self, video):
if video.type == Episode.TYPE:
return '%s s%se%s' % (video.grandparentTitle, video.seasonNumber, video.index)
return video.title
class Config(BaseConfig):
TEMPLATE = os.path.join(SHAREDIR, 'templates', 'plexserver_config.html')
FIELDS = utils.Bunch(BaseConfig.FIELDS, host={}, username={'save_to_keyring':True},
password={'save_to_keyring':True})
def validate_password(self, field, value):
if not value:
return value
try:
MyPlexAccount.signin(self.fields.username.value, value)
return value
except Unauthorized:
raise ValidationError('Invalid username or password.')
def validate_host(self, field, value):
if not value:
return value
try:
username = self.fields.username.value
password = self.fields.password.value
fetch_plex_instance(self.pkmeter, username, password, value)
return value
except Unauthorized:
raise ValidationError('Invalid username or password.')
except NotFound:
raise ValidationError('Server host or name not found.')
except:
raise ValidationError('Invalid server.')
def fetch_plex_instance(pkmeter, username=None, password=None, host=None):
username = username or pkmeter.config.get('plexserver', 'username', from_keyring=True)
password = password or pkmeter.config.get('plexserver', 'password', from_keyring=True)
host = host or pkmeter.config.get('plexserver', 'host', '')
if username:
log.info('Logging into MyPlex with user %s', username)
user = MyPlexAccount.signin(username, password)
return user.resource(host).connect()
log.info('Connecting to Plex host: %s', host)
return PlexServer(host)
def plex_dict(plex):
data = {}
data['baseurl'] = plex._baseurl
data['friendlyName'] = plex.friendlyName
data['machineIdentifier'] = plex.machineIdentifier
data['myPlex'] = plex.myPlex
data['myPlexMappingState'] = plex.myPlexMappingState
data['myPlexSigninState'] = plex.myPlexSigninState
data['myPlexSubscription'] = plex.myPlexSubscription
data['myPlexUsername'] = plex.myPlexUsername
data['platform'] = plex.platform
data['platformVersion'] = plex.platformVersion
data['updatedAt'] = plex.updatedAt
data['version'] = plex.version
return data
@register_filter()
def plexserver_length(value):
hours = int(value / 3600000)
minutes = int((value - (hours * 3600000)) / 60000)
return '%s:%02d' % (hours, minutes)
|
mjs7231/pkmeter
|
pkm/plugins/plexserver.py
|
Python
|
bsd-3-clause
| 4,435
|
import numpy as np
import os
import pandas as pd
import rpc
import sys
from sklearn_cifar_container import SklearnCifarContainer
from sklearn.metrics import accuracy_score
classes = [
'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
'ship', 'truck'
]
positive_class = classes.index('airplane')
negative_class = classes.index('bird')
def load_cifar(cifar_location, cifar_filename="train.data", norm=False):
cifar_path = cifar_location + "/" + cifar_filename
print("Source file: %s" % cifar_path)
df = pd.read_csv(cifar_path, sep=",", header=None)
data = df.values
print("Number of image files: %d" % len(data))
y = data[:, 0]
X = data[:, 1:]
Z = X
if norm:
mu = np.mean(X.T, 0)
sigma = np.var(X.T, 0)
Z = (X.T - mu) / np.array([np.sqrt(z) if z > 0 else 1. for z in sigma])
Z = Z.T
return (Z, y)
def filter_data(X, y):
X_train, y_train = [], []
for (example, label) in zip(X, y):
if label == positive_class:
X_train.append(example)
y_train.append(1.0)
elif label == negative_class:
X_train.append(example)
y_train.append(-1.0)
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
if __name__ == '__main__':
model_path = os.environ["CLIPPER_MODEL_PATH"]
pkl_names = [
l for l in os.listdir(model_path) if os.path.splitext(l)[1] == ".pkl"
]
assert len(pkl_names) == 1
pkl_path = os.path.join(model_path, pkl_names[0])
print(pkl_path)
model = SklearnCifarContainer(pkl_path)
X_test, y_test = load_cifar('data', 'test.data')
X_test, y_test = filter_data(X_test, y_test)
y_test[np.where(y_test == -1)] = 0
preds = model.predict_ints(X_test)
print("Test accuracy: %f" % accuracy_score(y_test, preds))
|
dubeyabhi07/clipper
|
containers/python/test_sklearn_cifar_container.py
|
Python
|
apache-2.0
| 1,871
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import print_function
"""Script to checkout a sprout appliance
Usage:
sprout.py checkout
"""
import click
import os
import signal
import sys
import time
import yaml
from cfme.test_framework.sprout.client import AuthException
from cfme.test_framework.sprout.plugin import SproutManager, SproutProvisioningRequest
from cfme.utils.path import conf_path
@click.group(help='Functions for interacting with sprout')
def main():
pass
@main.command('checkout', help='Checkout appliance and start keepalive daemon')
@click.option('--appliances', envvar="SPROUT_APPLIANCES", default=1,
help='How many appliances to provision')
@click.option('--timeout', default=60, help='How many minutes is the lease timeout')
@click.option('--provision-timeout', default=60,
help='How many minutes to wait for appliances provisioned')
@click.option('--group', required=True, envvar='SPROUT_GROUP', help='Which stream to use')
@click.option('--version', default=None, help='Which version to use')
@click.option('--date', default=None, help='Which date to use')
@click.option('--desc', default=None, envvar='SPROUT_DESC', help='Set description of the pool')
@click.option('--override-ram', default=0, help='Override RAM (MB). 0 means no override')
@click.option('--override-cpu', default=0,
help='Override CPU core count. 0 means no override')
@click.option('--populate-yaml', is_flag=True, default=False,
help="Populate the yaml with the appliance")
@click.option('--provider', default=None, help="Which provider to use")
@click.option('--provider-type', 'provider_type', default=None,
help='A provider type to select from')
@click.option('--template-type', 'template_type', default=None, help='A template type')
@click.option('--preconfigured/--notconfigured', default=True,
help='Whether the appliance is configured')
@click.option('--user-key', 'sprout_user_key', default=None,
help='Key for sprout user in credentials yaml, '
'alternatively set SPROUT_USER and SPROUT_PASSWORD env vars')
def checkout(appliances, timeout, provision_timeout, group, version, date, desc,
override_ram, override_cpu, populate_yaml, provider, provider_type, template_type,
preconfigured, sprout_user_key):
"""checks out a sprout provisioning request, and returns it on exit"""
override_cpu = override_cpu or None
override_ram = override_ram or None
sr = SproutProvisioningRequest(group=group, count=appliances, version=version, date=date,
lease_time=timeout, provision_timeout=provision_timeout,
desc=desc, cpu=override_cpu, ram=override_ram, provider=provider,
provider_type=provider_type, template_type=template_type,
preconfigured=preconfigured)
print(sr)
sm = SproutManager(sprout_user_key=sprout_user_key)
def exit_gracefully(signum, frame):
sm.destroy_pool()
sys.exit(0)
signal.signal(signal.SIGINT, exit_gracefully)
signal.signal(signal.SIGTERM, exit_gracefully)
try:
appliance_data = sm.request_appliances(sr)
while not sm.check_fullfilled():
print("waiting...")
time.sleep(10)
sm.reset_timer()
for app in appliance_data:
print("{}: {}".format(app['name'], app['ip_address']))
if populate_yaml:
populate_config_from_appliances(appliance_data)
print("Appliance checked out, hit ctrl+c to checkin")
while True:
time.sleep(10)
except KeyboardInterrupt:
try:
sm.destroy_pool()
except Exception:
print("Error in pool destroy")
except AuthException:
print('\nERROR: Sprout client unauthenticated, please provide env vars or --user-key')
def populate_config_from_appliances(appliance_data):
"""populates env.local.yaml with the appliances just obtained
args:
appliance_data: the data of the appliances as taken from sprout
"""
file_name = conf_path.join('env.local.yaml').strpath
if os.path.exists(file_name):
with open(file_name) as f:
y_data = yaml.load(f)
if not y_data:
y_data = {}
else:
y_data = {}
if y_data:
with open(conf_path.join('env.local.backup').strpath, 'w') as f:
yaml.dump(y_data, f, default_flow_style=False)
y_data['appliances'] = []
for app in appliance_data:
app_config = dict(
hostname=app['ip_address'],
ui_protocol="https",
version=str(app['template_version']),
)
y_data['appliances'].append(app_config)
with open(file_name, 'w') as f:
# Use safe dump to avoid !!python/unicode tags
yaml.safe_dump(y_data, f, default_flow_style=False)
if __name__ == "__main__":
main()
|
anurag03/integration_tests
|
cfme/scripting/sprout.py
|
Python
|
gpl-2.0
| 5,048
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .utils import SubdueTestCase, TempSub
class TestCommandExecution(SubdueTestCase):
def test_simple_command(self):
with TempSub(self, name='simple', thin=False) as s:
s.create_subcommand('status', 'sh', '''echo "I am status"'''
).run_it(
).assertSucess(
).stdout.matches(r'^I am status$')
def test_two_level_command(self):
with TempSub(self, name='ex', thin=False) as sub:
sub.create_subcommand('server/status', 'sh', '''echo "I am server status"'''
).run_it(
).assertSucess(
).stdout.matches(r'^I am server status$')
def test_arguments(self):
with TempSub(self, name='ex', thin=False) as sub:
sub.create_subcommand('example/arguments', 'sh', '''echo "My arguments: $@"'''
).run_it('foo', 'bar', '-a', 'baz'
).assertSucess(
).stdout.matches(r'^My arguments: foo bar -a baz')
|
jdevera/subdue
|
test/test_command_execution.py
|
Python
|
mit
| 1,116
|
import sys
try:
from django.conf import settings
from django.test.utils import get_runner
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
}
},
ROOT_URLCONF="cbh_datastore_model.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"cbh_datastore_model",
],
SITE_ID=1,
MIDDLEWARE_CLASSES=(),
)
try:
import django
setup = django.setup
except AttributeError:
pass
else:
setup()
except ImportError:
import traceback
traceback.print_exc()
raise ImportError(
"To fix this error, run: pip install -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests(*sys.argv[1:])
|
thesgc/cbh_datastore_model
|
runtests.py
|
Python
|
mit
| 1,196
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import warnings
from datetime import timedelta
import operator
import pytest
from string import ascii_lowercase
from numpy import nan
from numpy.random import randn
import numpy as np
from pandas.compat import lrange, PY35
from pandas import (compat, isna, notna, DataFrame, Series,
MultiIndex, date_range, Timestamp, Categorical,
_np_version_under1p12,
to_datetime, to_timedelta)
import pandas as pd
import pandas.core.nanops as nanops
import pandas.core.algorithms as algorithms
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData
class TestDataFrameAnalytics(TestData):
# ---------------------------------------------------------------------=
# Correlation and covariance
@td.skip_if_no_scipy
def test_corr_pearson(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('pearson')
@td.skip_if_no_scipy
def test_corr_kendall(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('kendall')
@td.skip_if_no_scipy
def test_corr_spearman(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
self._check_method('spearman')
def _check_method(self, method='pearson', check_minp=False):
if not check_minp:
correls = self.frame.corr(method=method)
exp = self.frame['A'].corr(self.frame['C'], method=method)
tm.assert_almost_equal(correls['A']['C'], exp)
else:
result = self.frame.corr(min_periods=len(self.frame) - 8)
expected = self.frame.corr()
expected.loc['A', 'B'] = expected.loc['B', 'A'] = nan
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_non_numeric(self):
self.frame['A'][:5] = nan
self.frame['B'][5:10] = nan
# exclude non-numeric types
result = self.mixed_frame.corr()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].corr()
tm.assert_frame_equal(result, expected)
@td.skip_if_no_scipy
def test_corr_nooverlap(self):
# nothing in common
for meth in ['pearson', 'kendall', 'spearman']:
df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1.5, 1],
'C': [np.nan, np.nan, np.nan, np.nan,
np.nan, np.nan]})
rs = df.corr(meth)
assert isna(rs.loc['A', 'B'])
assert isna(rs.loc['B', 'A'])
assert rs.loc['A', 'A'] == 1
assert rs.loc['B', 'B'] == 1
assert isna(rs.loc['C', 'C'])
@td.skip_if_no_scipy
def test_corr_constant(self):
# constant --> all NA
for meth in ['pearson', 'spearman']:
df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],
'B': [np.nan, np.nan, np.nan, 1, 1, 1]})
rs = df.corr(meth)
assert isna(rs.values).all()
def test_corr_int(self):
# dtypes other than float64 #1761
df3 = DataFrame({"a": [1, 2, 3, 4], "b": [1, 2, 3, 4]})
df3.cov()
df3.corr()
@td.skip_if_no_scipy
def test_corr_int_and_boolean(self):
# when dtypes of pandas series are different
# then ndarray will have dtype=object,
# so it need to be properly handled
df = DataFrame({"a": [True, False], "b": [1, 0]})
expected = DataFrame(np.ones((2, 2)), index=[
'a', 'b'], columns=['a', 'b'])
for meth in ['pearson', 'kendall', 'spearman']:
# RuntimeWarning
with warnings.catch_warnings(record=True):
result = df.corr(meth)
tm.assert_frame_equal(result, expected)
def test_corr_cov_independent_index_column(self):
# GH 14617
df = pd.DataFrame(np.random.randn(4 * 10).reshape(10, 4),
columns=list("abcd"))
for method in ['cov', 'corr']:
result = getattr(df, method)()
assert result.index is not result.columns
assert result.index.equals(result.columns)
def test_cov(self):
# min_periods no NAs (corner case)
expected = self.frame.cov()
result = self.frame.cov(min_periods=len(self.frame))
tm.assert_frame_equal(expected, result)
result = self.frame.cov(min_periods=len(self.frame) + 1)
assert isna(result.values).all()
# with NAs
frame = self.frame.copy()
frame['A'][:5] = nan
frame['B'][5:10] = nan
result = self.frame.cov(min_periods=len(self.frame) - 8)
expected = self.frame.cov()
expected.loc['A', 'B'] = np.nan
expected.loc['B', 'A'] = np.nan
# regular
self.frame['A'][:5] = nan
self.frame['B'][:10] = nan
cov = self.frame.cov()
tm.assert_almost_equal(cov['A']['C'],
self.frame['A'].cov(self.frame['C']))
# exclude non-numeric types
result = self.mixed_frame.cov()
expected = self.mixed_frame.loc[:, ['A', 'B', 'C', 'D']].cov()
tm.assert_frame_equal(result, expected)
# Single column frame
df = DataFrame(np.linspace(0.0, 1.0, 10))
result = df.cov()
expected = DataFrame(np.cov(df.values.T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
df.loc[0] = np.nan
result = df.cov()
expected = DataFrame(np.cov(df.values[1:].T).reshape((1, 1)),
index=df.columns, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_corrwith(self):
a = self.tsframe
noise = Series(randn(len(a)), index=a.index)
b = self.tsframe.add(noise, axis=0)
# make sure order does not matter
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
colcorr = a.corrwith(b, axis=0)
tm.assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
tm.assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))
dropped = a.corrwith(b, axis=0, drop=True)
tm.assert_almost_equal(dropped['A'], a['A'].corr(b['A']))
assert 'B' not in dropped
dropped = a.corrwith(b, axis=1, drop=True)
assert a.index[-1] not in dropped.index
# non time-series data
index = ['a', 'b', 'c', 'd', 'e']
columns = ['one', 'two', 'three', 'four']
df1 = DataFrame(randn(5, 4), index=index, columns=columns)
df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)
correls = df1.corrwith(df2, axis=1)
for row in index[:4]:
tm.assert_almost_equal(correls[row],
df1.loc[row].corr(df2.loc[row]))
def test_corrwith_with_objects(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame()
cols = ['A', 'B', 'C', 'D']
df1['obj'] = 'foo'
df2['obj'] = 'bar'
result = df1.corrwith(df2)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols])
tm.assert_series_equal(result, expected)
result = df1.corrwith(df2, axis=1)
expected = df1.loc[:, cols].corrwith(df2.loc[:, cols], axis=1)
tm.assert_series_equal(result, expected)
def test_corrwith_series(self):
result = self.tsframe.corrwith(self.tsframe['A'])
expected = self.tsframe.apply(self.tsframe['A'].corr)
tm.assert_series_equal(result, expected)
def test_corrwith_matches_corrcoef(self):
df1 = DataFrame(np.arange(10000), columns=['a'])
df2 = DataFrame(np.arange(10000) ** 2, columns=['a'])
c1 = df1.corrwith(df2)['a']
c2 = np.corrcoef(df1['a'], df2['a'])[0][1]
tm.assert_almost_equal(c1, c2)
assert c1 < 1
def test_corrwith_mixed_dtypes(self):
# GH 18570
df = pd.DataFrame({'a': [1, 4, 3, 2], 'b': [4, 6, 7, 3],
'c': ['a', 'b', 'c', 'd']})
s = pd.Series([0, 6, 7, 3])
result = df.corrwith(s)
corrs = [df['a'].corr(s), df['b'].corr(s)]
expected = pd.Series(data=corrs, index=['a', 'b'])
tm.assert_series_equal(result, expected)
def test_bool_describe_in_mixed_frame(self):
df = DataFrame({
'string_data': ['a', 'b', 'c', 'd', 'e'],
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
})
# Integer data are included in .describe() output,
# Boolean and string data are not.
result = df.describe()
expected = DataFrame({'int_data': [5, 30, df.int_data.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
# Top value is a boolean value that is False
result = df.describe(include=['bool'])
expected = DataFrame({'bool_data': [5, 2, False, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_bool_frame(self):
# GH 13891
df = pd.DataFrame({
'bool_data_1': [False, False, True, True],
'bool_data_2': [False, True, True, True]
})
result = df.describe()
expected = DataFrame({'bool_data_1': [4, 2, True, 2],
'bool_data_2': [4, 2, True, 3]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True, False],
'int_data': [0, 1, 2, 3, 4]
})
result = df.describe()
expected = DataFrame({'int_data': [5, 2, df.int_data.std(), 0, 1,
2, 3, 4]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({
'bool_data': [False, False, True, True],
'str_data': ['a', 'b', 'c', 'a']
})
result = df.describe()
expected = DataFrame({'bool_data': [4, 2, True, 2],
'str_data': [4, 3, 'a', 2]},
index=['count', 'unique', 'top', 'freq'])
tm.assert_frame_equal(result, expected)
def test_describe_categorical(self):
df = DataFrame({'value': np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=['value'], ascending=True)
df['value_group'] = pd.cut(df.value, range(0, 10500, 500),
right=False, labels=cat_labels)
cat = df
# Categoricals should not show up together with numerical columns
result = cat.describe()
assert len(result.columns) == 1
# In a frame, describe() for the cat should be the same as for string
# arrays (count, unique, top, freq)
cat = Categorical(["a", "b", "b", "b"], categories=['a', 'b', 'c'],
ordered=True)
s = Series(cat)
result = s.describe()
expected = Series([4, 2, "b", 3],
index=['count', 'unique', 'top', 'freq'])
tm.assert_series_equal(result, expected)
cat = Series(Categorical(["a", "b", "c", "c"]))
df3 = DataFrame({"cat": cat, "s": ["a", "b", "c", "c"]})
res = df3.describe()
tm.assert_numpy_array_equal(res["cat"].values, res["s"].values)
def test_describe_categorical_columns(self):
# GH 11558
columns = pd.CategoricalIndex(['int1', 'int2', 'obj'],
ordered=True, name='XXX')
df = DataFrame({'int1': [10, 20, 30, 40, 50],
'int2': [10, 20, 30, 40, 50],
'obj': ['A', 0, None, 'X', 1]},
columns=columns)
result = df.describe()
exp_columns = pd.CategoricalIndex(['int1', 'int2'],
categories=['int1', 'int2', 'obj'],
ordered=True, name='XXX')
expected = DataFrame({'int1': [5, 30, df.int1.std(),
10, 20, 30, 40, 50],
'int2': [5, 30, df.int2.std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'],
columns=exp_columns)
tm.assert_frame_equal(result, expected)
tm.assert_categorical_equal(result.columns.values,
expected.columns.values)
def test_describe_datetime_columns(self):
columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
freq='MS', tz='US/Eastern', name='XXX')
df = DataFrame({0: [10, 20, 30, 40, 50],
1: [10, 20, 30, 40, 50],
2: ['A', 0, None, 'X', 1]})
df.columns = columns
result = df.describe()
exp_columns = pd.DatetimeIndex(['2011-01-01', '2011-02-01'],
freq='MS', tz='US/Eastern', name='XXX')
expected = DataFrame({0: [5, 30, df.iloc[:, 0].std(),
10, 20, 30, 40, 50],
1: [5, 30, df.iloc[:, 1].std(),
10, 20, 30, 40, 50]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
expected.columns = exp_columns
tm.assert_frame_equal(result, expected)
assert result.columns.freq == 'MS'
assert result.columns.tz == expected.columns.tz
def test_describe_timedelta_values(self):
# GH 6145
t1 = pd.timedelta_range('1 days', freq='D', periods=5)
t2 = pd.timedelta_range('1 hours', freq='H', periods=5)
df = pd.DataFrame({'t1': t1, 't2': t2})
expected = DataFrame({'t1': [5, pd.Timedelta('3 days'),
df.iloc[:, 0].std(),
pd.Timedelta('1 days'),
pd.Timedelta('2 days'),
pd.Timedelta('3 days'),
pd.Timedelta('4 days'),
pd.Timedelta('5 days')],
't2': [5, pd.Timedelta('3 hours'),
df.iloc[:, 1].std(),
pd.Timedelta('1 hours'),
pd.Timedelta('2 hours'),
pd.Timedelta('3 hours'),
pd.Timedelta('4 hours'),
pd.Timedelta('5 hours')]},
index=['count', 'mean', 'std', 'min', '25%',
'50%', '75%', 'max'])
res = df.describe()
tm.assert_frame_equal(res, expected)
exp_repr = (" t1 t2\n"
"count 5 5\n"
"mean 3 days 00:00:00 0 days 03:00:00\n"
"std 1 days 13:56:50.394919 0 days 01:34:52.099788\n"
"min 1 days 00:00:00 0 days 01:00:00\n"
"25% 2 days 00:00:00 0 days 02:00:00\n"
"50% 3 days 00:00:00 0 days 03:00:00\n"
"75% 4 days 00:00:00 0 days 04:00:00\n"
"max 5 days 00:00:00 0 days 05:00:00")
assert repr(res) == exp_repr
def test_describe_tz_values(self, tz_naive_fixture):
# GH 21332
tz = tz_naive_fixture
s1 = Series(range(5))
start = Timestamp(2018, 1, 1)
end = Timestamp(2018, 1, 5)
s2 = Series(date_range(start, end, tz=tz))
df = pd.DataFrame({'s1': s1, 's2': s2})
expected = DataFrame({'s1': [5, np.nan, np.nan, np.nan, np.nan, np.nan,
2, 1.581139, 0, 1, 2, 3, 4],
's2': [5, 5, s2.value_counts().index[0], 1,
start.tz_localize(tz),
end.tz_localize(tz), np.nan, np.nan,
np.nan, np.nan, np.nan, np.nan, np.nan]},
index=['count', 'unique', 'top', 'freq', 'first',
'last', 'mean', 'std', 'min', '25%', '50%',
'75%', 'max']
)
res = df.describe(include='all')
tm.assert_frame_equal(res, expected)
def test_reduce_mixed_frame(self):
# GH 6806
df = DataFrame({
'bool_data': [True, True, False, False, False],
'int_data': [10, 20, 30, 40, 50],
'string_data': ['a', 'b', 'c', 'd', 'e'],
})
df.reindex(columns=['bool_data', 'int_data', 'string_data'])
test = df.sum(axis=0)
tm.assert_numpy_array_equal(test.values,
np.array([2, 150, 'abcde'], dtype=object))
tm.assert_series_equal(test, df.T.sum(axis=1))
def test_count(self):
f = lambda s: notna(s).sum()
self._check_stat_op('count', f,
has_skipna=False,
has_numeric_only=True,
check_dtype=False,
check_dates=True)
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH #423
df = DataFrame(index=lrange(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=lrange(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_nunique(self):
f = lambda s: len(algorithms.unique1d(s.dropna()))
self._check_stat_op('nunique', f, has_skipna=False,
check_dtype=False, check_dates=True)
df = DataFrame({'A': [1, 1, 1],
'B': [1, 2, 3],
'C': [1, np.nan, 3]})
tm.assert_series_equal(df.nunique(), Series({'A': 1, 'B': 3, 'C': 2}))
tm.assert_series_equal(df.nunique(dropna=False),
Series({'A': 1, 'B': 3, 'C': 3}))
tm.assert_series_equal(df.nunique(axis=1), Series({0: 1, 1: 2, 2: 2}))
tm.assert_series_equal(df.nunique(axis=1, dropna=False),
Series({0: 1, 1: 3, 2: 2}))
def test_sum(self):
self._check_stat_op('sum', np.sum, has_numeric_only=True,
skipna_alternative=np.nansum)
# mixed types (with upcasting happening)
self._check_stat_op('sum', np.sum,
frame=self.mixed_float.astype('float32'),
has_numeric_only=True, check_dtype=False,
check_less_precise=True)
@pytest.mark.parametrize(
"method", ['sum', 'mean', 'prod', 'var',
'std', 'skew', 'min', 'max'])
def test_stat_operators_attempt_obj_array(self, method):
# GH #676
data = {
'a': [-0.00049987540199591344, -0.0016467257772919831,
0.00067695870775883013],
'b': [-0, -0, 0.0],
'c': [0.00031111847529610595, 0.0014902627951905339,
-0.00094099200035979691]
}
df1 = DataFrame(data, index=['foo', 'bar', 'baz'],
dtype='O')
df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],
2: [np.nan, 4]}, dtype=object)
for df in [df1, df2]:
assert df.values.dtype == np.object_
result = getattr(df, method)(1)
expected = getattr(df.astype('f8'), method)(1)
if method in ['sum', 'prod']:
tm.assert_series_equal(result, expected)
def test_mean(self):
self._check_stat_op('mean', np.mean, check_dates=True)
def test_product(self):
self._check_stat_op('product', np.prod)
def test_median(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, check_dates=True)
def test_min(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('min', np.min, check_dates=True)
self._check_stat_op('min', np.min, frame=self.intframe)
def test_cummin(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummin = self.tsframe.cummin()
expected = self.tsframe.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = self.tsframe.cummin(axis=1)
expected = self.tsframe.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummin() # noqa
# fix issue
cummin_xs = self.tsframe.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(self.tsframe)
def test_cummax(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cummax = self.tsframe.cummax()
expected = self.tsframe.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = self.tsframe.cummax(axis=1)
expected = self.tsframe.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cummax() # noqa
# fix issue
cummax_xs = self.tsframe.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(self.tsframe)
def test_max(self):
with warnings.catch_warnings(record=True):
self._check_stat_op('max', np.max, check_dates=True)
self._check_stat_op('max', np.max, frame=self.intframe)
def test_mad(self):
f = lambda x: np.abs(x - x.mean()).mean()
self._check_stat_op('mad', f)
def test_var_std(self):
alt = lambda x: np.var(x, ddof=1)
self._check_stat_op('var', alt)
alt = lambda x: np.std(x, ddof=1)
self._check_stat_op('std', alt)
result = self.tsframe.std(ddof=4)
expected = self.tsframe.apply(lambda x: x.std(ddof=4))
tm.assert_almost_equal(result, expected)
result = self.tsframe.var(ddof=4)
expected = self.tsframe.apply(lambda x: x.var(ddof=4))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nanvar(arr, axis=0)
assert not (result < 0).any()
@pytest.mark.parametrize(
"meth", ['sem', 'var', 'std'])
def test_numeric_only_flag(self, meth):
# GH #9201
df1 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a number in str format
df1.loc[0, 'foo'] = '100'
df2 = DataFrame(np.random.randn(5, 3), columns=['foo', 'bar', 'baz'])
# set one entry to a non-number str
df2.loc[0, 'foo'] = 'a'
result = getattr(df1, meth)(axis=1, numeric_only=True)
expected = getattr(df1[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
result = getattr(df2, meth)(axis=1, numeric_only=True)
expected = getattr(df2[['bar', 'baz']], meth)(axis=1)
tm.assert_series_equal(expected, result)
# df1 has all numbers, df2 has a letter inside
pytest.raises(TypeError, lambda: getattr(df1, meth)(
axis=1, numeric_only=False))
pytest.raises(TypeError, lambda: getattr(df2, meth)(
axis=1, numeric_only=False))
def test_mixed_ops(self):
# GH 16116
df = DataFrame({'int': [1, 2, 3, 4],
'float': [1., 2., 3., 4.],
'str': ['a', 'b', 'c', 'd']})
for op in ['mean', 'std', 'var', 'skew',
'kurt', 'sem']:
result = getattr(df, op)()
assert len(result) == 2
with pd.option_context('use_bottleneck', False):
result = getattr(df, op)()
assert len(result) == 2
def test_cumsum(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumsum = self.tsframe.cumsum()
expected = self.tsframe.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = self.tsframe.cumsum(axis=1)
expected = self.tsframe.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({'A': np.arange(20)}, index=np.arange(20))
result = df.cumsum() # noqa
# fix issue
cumsum_xs = self.tsframe.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(self.tsframe)
def test_cumprod(self):
self.tsframe.loc[5:10, 0] = nan
self.tsframe.loc[10:15, 1] = nan
self.tsframe.loc[15:, 2] = nan
# axis = 0
cumprod = self.tsframe.cumprod()
expected = self.tsframe.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = self.tsframe.cumprod(axis=1)
expected = self.tsframe.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = self.tsframe.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(self.tsframe)
# ints
df = self.tsframe.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = self.tsframe.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
def test_sem(self):
alt = lambda x: np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
result = self.tsframe.sem(ddof=4)
expected = self.tsframe.apply(
lambda x: x.std(ddof=4) / np.sqrt(len(x)))
tm.assert_almost_equal(result, expected)
arr = np.repeat(np.random.random((1, 1000)), 1000, 0)
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
with pd.option_context('use_bottleneck', False):
result = nanops.nansem(arr, axis=0)
assert not (result < 0).any()
@td.skip_if_no_scipy
def test_skew(self):
from scipy.stats import skew
def alt(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', alt)
@td.skip_if_no_scipy
def test_kurt(self):
from scipy.stats import kurtosis
def alt(x):
if len(x) < 4:
return np.nan
return kurtosis(x, bias=False)
self._check_stat_op('kurt', alt)
index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],
labels=[[0, 0, 0, 0, 0, 0],
[0, 1, 2, 0, 1, 2],
[0, 1, 0, 1, 0, 1]])
df = DataFrame(np.random.randn(6, 3), index=index)
kurt = df.kurt()
kurt2 = df.kurt(level=0).xs('bar')
tm.assert_series_equal(kurt, kurt2, check_names=False)
assert kurt.name is None
assert kurt2.name == 'bar'
def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,
has_numeric_only=False, check_dtype=True,
check_dates=False, check_less_precise=False,
skipna_alternative=None):
if frame is None:
frame = self.frame
# set some NAs
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if check_dates:
df = DataFrame({'b': date_range('1/1/2001', periods=2)})
_f = getattr(df, name)
result = _f()
assert isinstance(result, Series)
df['a'] = lrange(len(df))
result = getattr(df, name)()
assert isinstance(result, Series)
assert len(result)
if has_skipna:
def wrapper(x):
return alternative(x.values)
skipna_wrapper = tm._make_skipna_wrapper(alternative,
skipna_alternative)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
# HACK: win32
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False,
check_less_precise=check_less_precise)
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper),
check_dtype=check_dtype,
check_less_precise=check_less_precise)
if name in ['sum', 'prod']:
exp = frame.apply(skipna_wrapper, axis=1)
tm.assert_series_equal(result1, exp, check_dtype=False,
check_less_precise=check_less_precise)
# check dtypes
if check_dtype:
lcd_dtype = frame.values.dtype
assert lcd_dtype == result0.dtype
assert lcd_dtype == result1.dtype
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
tm.assert_raises_regex(ValueError, 'No axis named 2', f, axis=2)
# make sure works on mixed-type frame
getattr(self.mixed_frame, name)(axis=0)
getattr(self.mixed_frame, name)(axis=1)
if has_numeric_only:
getattr(self.mixed_frame, name)(axis=0, numeric_only=True)
getattr(self.mixed_frame, name)(axis=1, numeric_only=True)
getattr(self.frame, name)(axis=0, numeric_only=False)
getattr(self.frame, name)(axis=1, numeric_only=False)
# all NA case
if has_skipna:
all_na = self.frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name in ['sum', 'prod']:
unit = int(name == 'prod')
expected = pd.Series(unit, index=r0.index, dtype=r0.dtype)
tm.assert_series_equal(r0, expected)
expected = pd.Series(unit, index=r1.index, dtype=r1.dtype)
tm.assert_series_equal(r1, expected)
@pytest.mark.parametrize("dropna, expected", [
(True, {'A': [12],
'B': [10.0],
'C': [1.0],
'D': ['a'],
'E': Categorical(['a'], categories=['a']),
'F': to_datetime(['2000-1-2']),
'G': to_timedelta(['1 days'])}),
(False, {'A': [12],
'B': [10.0],
'C': [np.nan],
'D': np.array([np.nan], dtype=object),
'E': Categorical([np.nan], categories=['a']),
'F': [pd.NaT],
'G': to_timedelta([pd.NaT])}),
(True, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical(['a', np.nan, np.nan, np.nan],
categories=['a']),
'L': to_datetime(['2000-1-2', 'NaT', 'NaT', 'NaT']),
'M': to_timedelta(['1 days', 'nan', 'nan', 'nan']),
'N': [0, 1, 2, 3]}),
(False, {'H': [8, 9, np.nan, np.nan],
'I': [8, 9, np.nan, np.nan],
'J': [1, np.nan, np.nan, np.nan],
'K': Categorical([np.nan, 'a', np.nan, np.nan],
categories=['a']),
'L': to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
'M': to_timedelta(['nan', '1 days', 'nan', 'nan']),
'N': [0, 1, 2, 3]})
])
def test_mode_dropna(self, dropna, expected):
df = DataFrame({"A": [12, 12, 19, 11],
"B": [10, 10, np.nan, 3],
"C": [1, np.nan, np.nan, np.nan],
"D": [np.nan, np.nan, 'a', np.nan],
"E": Categorical([np.nan, np.nan, 'a', np.nan]),
"F": to_datetime(['NaT', '2000-1-2', 'NaT', 'NaT']),
"G": to_timedelta(['1 days', 'nan', 'nan', 'nan']),
"H": [8, 8, 9, 9],
"I": [9, 9, 8, 8],
"J": [1, 1, np.nan, np.nan],
"K": Categorical(['a', np.nan, 'a', np.nan]),
"L": to_datetime(['2000-1-2', '2000-1-2',
'NaT', 'NaT']),
"M": to_timedelta(['1 days', 'nan',
'1 days', 'nan']),
"N": np.arange(4, dtype='int64')})
result = df[sorted(list(expected.keys()))].mode(dropna=dropna)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not compat.PY3, reason="only PY3")
def test_mode_sortwarning(self):
# Check for the warning that is raised when the mode
# results cannot be sorted
df = DataFrame({"A": [np.nan, np.nan, 'a', 'a']})
expected = DataFrame({'A': ['a', np.nan]})
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
result = df.mode(dropna=False)
result = result.sort_values(by='A').reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_operators_timedelta64(self):
from datetime import timedelta
df = DataFrame(dict(A=date_range('2012-1-1', periods=3, freq='D'),
B=date_range('2012-1-2', periods=3, freq='D'),
C=Timestamp('20120101') -
timedelta(minutes=5, seconds=5)))
diffs = DataFrame(dict(A=df['A'] - df['C'],
B=df['A'] - df['B']))
# min
result = diffs.min()
assert result[0] == diffs.loc[0, 'A']
assert result[1] == diffs.loc[0, 'B']
result = diffs.min(axis=1)
assert (result == diffs.loc[0, 'B']).all()
# max
result = diffs.max()
assert result[0] == diffs.loc[2, 'A']
assert result[1] == diffs.loc[2, 'B']
result = diffs.max(axis=1)
assert (result == diffs['A']).all()
# abs
result = diffs.abs()
result2 = abs(diffs)
expected = DataFrame(dict(A=df['A'] - df['C'],
B=df['B'] - df['A']))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# mixed frame
mixed = diffs.copy()
mixed['C'] = 'foo'
mixed['D'] = 1
mixed['E'] = 1.
mixed['F'] = Timestamp('20130101')
# results in an object array
from pandas.core.tools.timedeltas import (
_coerce_scalar_to_timedelta_type as _coerce)
result = mixed.min()
expected = Series([_coerce(timedelta(seconds=5 * 60 + 5)),
_coerce(timedelta(days=-1)),
'foo', 1, 1.0,
Timestamp('20130101')],
index=mixed.columns)
tm.assert_series_equal(result, expected)
# excludes numeric
result = mixed.min(axis=1)
expected = Series([1, 1, 1.], index=[0, 1, 2])
tm.assert_series_equal(result, expected)
# works when only those columns are selected
result = mixed[['A', 'B']].min(1)
expected = Series([timedelta(days=-1)] * 3)
tm.assert_series_equal(result, expected)
result = mixed[['A', 'B']].min()
expected = Series([timedelta(seconds=5 * 60 + 5),
timedelta(days=-1)], index=['A', 'B'])
tm.assert_series_equal(result, expected)
# GH 3106
df = DataFrame({'time': date_range('20130102', periods=5),
'time2': date_range('20130105', periods=5)})
df['off1'] = df['time2'] - df['time']
assert df['off1'].dtype == 'timedelta64[ns]'
df['off2'] = df['time'] - df['time2']
df._consolidate_inplace()
assert df['off1'].dtype == 'timedelta64[ns]'
assert df['off2'].dtype == 'timedelta64[ns]'
def test_sum_corner(self):
axis0 = self.empty.sum(0)
axis1 = self.empty.sum(1)
assert isinstance(axis0, Series)
assert isinstance(axis1, Series)
assert len(axis0) == 0
assert len(axis1) == 0
@pytest.mark.parametrize('method, unit', [
('sum', 0),
('prod', 1),
])
def test_sum_prod_nanops(self, method, unit):
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [unit, unit],
"b": [unit, np.nan],
"c": [np.nan, np.nan]})
# The default
result = getattr(df, method)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
# min_count=1
result = getattr(df, method)(min_count=1)
expected = pd.Series([unit, unit, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = getattr(df, method)(min_count=0)
expected = pd.Series([unit, unit, unit], index=idx, dtype='float64')
tm.assert_series_equal(result, expected)
result = getattr(df.iloc[1:], method)(min_count=1)
expected = pd.Series([unit, np.nan, np.nan], index=idx)
tm.assert_series_equal(result, expected)
# min_count > 1
df = pd.DataFrame({"A": [unit] * 10, "B": [unit] * 5 + [np.nan] * 5})
result = getattr(df, method)(min_count=5)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
result = getattr(df, method)(min_count=6)
expected = pd.Series(result, index=['A', 'B'])
tm.assert_series_equal(result, expected)
def test_sum_nanops_timedelta(self):
# prod isn't defined on timedeltas
idx = ['a', 'b', 'c']
df = pd.DataFrame({"a": [0, 0],
"b": [0, np.nan],
"c": [np.nan, np.nan]})
df2 = df.apply(pd.to_timedelta)
# 0 by default
result = df2.sum()
expected = pd.Series([0, 0, 0], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
# min_count=0
result = df2.sum(min_count=0)
tm.assert_series_equal(result, expected)
# min_count=1
result = df2.sum(min_count=1)
expected = pd.Series([0, 0, np.nan], dtype='m8[ns]', index=idx)
tm.assert_series_equal(result, expected)
def test_sum_object(self):
values = self.frame.values.astype(int)
frame = DataFrame(values, index=self.frame.index,
columns=self.frame.columns)
deltas = frame * timedelta(1)
deltas.sum()
def test_sum_bool(self):
# ensure this works, bug report
bools = np.isnan(self.frame)
bools.sum(1)
bools.sum(0)
def test_mean_corner(self):
# unit test when have object data
the_mean = self.mixed_frame.mean(axis=0)
the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
assert len(the_mean.index) < len(self.mixed_frame.columns)
# xs sum mixed type, just want to know it works...
the_mean = self.mixed_frame.mean(axis=1)
the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)
tm.assert_index_equal(the_sum.index, the_mean.index)
# take mean of boolean column
self.frame['bool'] = self.frame['A'] > 0
means = self.frame.mean(0)
assert means['bool'] == self.frame['bool'].values.mean()
def test_stats_mixed_type(self):
# don't blow up
self.mixed_frame.std(1)
self.mixed_frame.var(1)
self.mixed_frame.mean(1)
self.mixed_frame.skew(1)
def test_median_corner(self):
def wrapper(x):
if isna(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper, frame=self.intframe,
check_dtype=False, check_dates=True)
# Miscellanea
def test_count_objects(self):
dm = DataFrame(self.mixed_frame._series)
df = DataFrame(self.mixed_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5),
index=lrange(4), columns=lrange(5))
# ?(wesm)
result = dm.cumsum() # noqa
def test_sum_bools(self):
df = DataFrame(index=lrange(1), columns=lrange(10))
bools = isna(df)
assert bools.sum(axis=1)[0] == 10
# Index of max / min
def test_idxmin(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmin(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmin, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmin, axis=2)
def test_idxmax(self):
frame = self.frame
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
for skipna in [True, False]:
for axis in [0, 1]:
for df in [frame, self.intframe]:
result = df.idxmax(axis=axis, skipna=skipna)
expected = df.apply(Series.idxmax, axis=axis,
skipna=skipna)
tm.assert_series_equal(result, expected)
pytest.raises(ValueError, frame.idxmax, axis=2)
# ----------------------------------------------------------------------
# Logical reductions
def test_any_all(self):
self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)
self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)
def test_any_all_extra(self):
df = DataFrame({
'A': [True, False, False],
'B': [True, True, False],
'C': [True, True, True],
}, index=['a', 'b', 'c'])
result = df[['A', 'B']].any(1)
expected = Series([True, True, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df[['A', 'B']].any(1, bool_only=True)
tm.assert_series_equal(result, expected)
result = df.all(1)
expected = Series([True, False, False], index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
result = df.all(1, bool_only=True)
tm.assert_series_equal(result, expected)
# Axis is None
result = df.all(axis=None).item()
assert result is False
result = df.any(axis=None).item()
assert result is True
result = df[['C']].all(axis=None).item()
assert result is True
# skip pathological failure cases
# class CantNonzero(object):
# def __nonzero__(self):
# raise ValueError
# df[4] = CantNonzero()
# it works!
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
# df[4][4] = np.nan
# df.any(1)
# df.all(1)
# df.any(1, bool_only=True)
# df.all(1, bool_only=True)
@pytest.mark.parametrize('func, data, expected', [
(np.any, {}, False),
(np.all, {}, True),
(np.any, {'A': []}, False),
(np.all, {'A': []}, True),
(np.any, {'A': [False, False]}, False),
(np.all, {'A': [False, False]}, False),
(np.any, {'A': [True, False]}, True),
(np.all, {'A': [True, False]}, False),
(np.any, {'A': [True, True]}, True),
(np.all, {'A': [True, True]}, True),
(np.any, {'A': [False], 'B': [False]}, False),
(np.all, {'A': [False], 'B': [False]}, False),
(np.any, {'A': [False, False], 'B': [False, True]}, True),
(np.all, {'A': [False, False], 'B': [False, True]}, False),
# other types
(np.all, {'A': pd.Series([0.0, 1.0], dtype='float')}, False),
(np.any, {'A': pd.Series([0.0, 1.0], dtype='float')}, True),
(np.all, {'A': pd.Series([0, 1], dtype=int)}, False),
(np.any, {'A': pd.Series([0, 1], dtype=int)}, True),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='M8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='M8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([0, 1], dtype='m8[ns]')}, False,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([0, 1], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.all, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
pytest.param(np.any, {'A': pd.Series([1, 2], dtype='m8[ns]')}, True,
marks=[td.skip_if_np_lt_115]),
(np.all, {'A': pd.Series([0, 1], dtype='category')}, False),
(np.any, {'A': pd.Series([0, 1], dtype='category')}, True),
(np.all, {'A': pd.Series([1, 2], dtype='category')}, True),
(np.any, {'A': pd.Series([1, 2], dtype='category')}, True),
# # Mix
# GH-21484
# (np.all, {'A': pd.Series([10, 20], dtype='M8[ns]'),
# 'B': pd.Series([10, 20], dtype='m8[ns]')}, True),
])
def test_any_all_np_func(self, func, data, expected):
# https://github.com/pandas-dev/pandas/issues/19976
data = DataFrame(data)
result = func(data)
assert isinstance(result, np.bool_)
assert result.item() is expected
# method version
result = getattr(DataFrame(data), func.__name__)(axis=None)
assert isinstance(result, np.bool_)
assert result.item() is expected
def test_any_all_object(self):
# https://github.com/pandas-dev/pandas/issues/19976
result = np.all(DataFrame(columns=['a', 'b'])).item()
assert result is True
result = np.any(DataFrame(columns=['a', 'b'])).item()
assert result is False
@pytest.mark.parametrize('method', ['any', 'all'])
def test_any_all_level_axis_none_raises(self, method):
df = DataFrame(
{"A": 1},
index=MultiIndex.from_product([['A', 'B'], ['a', 'b']],
names=['out', 'in'])
)
xpr = "Must specify 'axis' when aggregating by level."
with tm.assert_raises_regex(ValueError, xpr):
getattr(df, method)(axis=None, level='out')
def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,
has_bool_only=False):
if frame is None:
frame = self.frame > 0
# set some NAs
frame = DataFrame(frame.values.astype(object), frame.index,
frame.columns)
frame.loc[5:10] = np.nan
frame.loc[15:20, -2:] = np.nan
f = getattr(frame, name)
if has_skipna:
def skipna_wrapper(x):
nona = x.dropna().values
return alternative(nona)
def wrapper(x):
return alternative(x.values)
result0 = f(axis=0, skipna=False)
result1 = f(axis=1, skipna=False)
tm.assert_series_equal(result0, frame.apply(wrapper))
tm.assert_series_equal(result1, frame.apply(wrapper, axis=1),
check_dtype=False) # HACK: win32
else:
skipna_wrapper = alternative
wrapper = alternative
result0 = f(axis=0)
result1 = f(axis=1)
tm.assert_series_equal(result0, frame.apply(skipna_wrapper))
tm.assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),
check_dtype=False)
# result = f(axis=1)
# comp = frame.apply(alternative, axis=1).reindex(result.index)
# assert_series_equal(result, comp)
# bad axis
pytest.raises(ValueError, f, axis=2)
# make sure works on mixed-type frame
mixed = self.mixed_frame
mixed['_bool_'] = np.random.randn(len(mixed)) > 0
getattr(mixed, name)(axis=0)
getattr(mixed, name)(axis=1)
class NonzeroFail(object):
def __nonzero__(self):
raise ValueError
mixed['_nonzero_fail_'] = NonzeroFail()
if has_bool_only:
getattr(mixed, name)(axis=0, bool_only=True)
getattr(mixed, name)(axis=1, bool_only=True)
getattr(frame, name)(axis=0, bool_only=False)
getattr(frame, name)(axis=1, bool_only=False)
# all NA case
if has_skipna:
all_na = frame * np.NaN
r0 = getattr(all_na, name)(axis=0)
r1 = getattr(all_na, name)(axis=1)
if name == 'any':
assert not r0.any()
assert not r1.any()
else:
assert r0.all()
assert r1.all()
# ----------------------------------------------------------------------
# Isin
def test_isin(self):
# GH #4211
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
other = ['a', 'b', 'c']
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
d = {'A': ['a']}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})
df.columns = ['A', 'A']
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, 'A'] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH4763
df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],
'ids2': ['a', 'n', 'c', 'n']},
index=['foo', 'bar', 'baz', 'qux'])
with pytest.raises(TypeError):
df.isin('a')
with pytest.raises(TypeError):
df.isin('aaa')
def test_isin_df(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected['A'].loc[[1, 3]] = True
expected['B'].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ['A', 'C']
result = df1.isin(df2)
expected['B'] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH16394
df = pd.DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
df['C'] = list(zip(df['A'], df['B']))
result = df['C'].isin([(1, 'a')])
tm.assert_series_equal(result,
Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['B', 'B'])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=['A', 'B'], index=[0, 0, 1, 1])
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ['B', 'B']
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A', 'A'])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},
index=['a', 'b', 'c', 'd'])
s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected['A'].loc['a'] = True
expected.loc['d'] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),
(0, 'b', 'bar'), (0, 'b', 'baz'),
(2, 'a', 'foo'), (2, 'a', 'bar'),
(2, 'c', 'bar'), (2, 'c', 'baz'),
(1, 'b', 'foo'), (1, 'b', 'bar'),
(1, 'c', 'bar'), (1, 'c', 'baz')])
df1 = DataFrame({'A': np.ones(12),
'B': np.zeros(12)}, index=idx)
df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=['A', 'B'], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH 15473
df1_ts = DataFrame({'date':
pd.to_datetime(['2014-01-01', '2014-01-02'])})
df1_td = DataFrame({'date':
[pd.Timedelta(1, 's'), pd.Timedelta(2, 's')]})
df2 = DataFrame({'date': []})
df3 = DataFrame()
expected = DataFrame({'date': [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
# ----------------------------------------------------------------------
# Row deduplication
def test_drop_duplicates(self):
df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.loc[[]]
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates(np.array(['AAA', 'B']))
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep='last')
expected = df.loc[[0, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AAA', 'B'), keep=False)
expected = df.loc[[0]]
tm.assert_frame_equal(result, expected)
# consider everything
df2 = df.loc[:, ['AAA', 'B', 'C']]
result = df2.drop_duplicates()
# in this case only
expected = df2.drop_duplicates(['AAA', 'B'])
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep='last')
expected = df2.drop_duplicates(['AAA', 'B'], keep='last')
tm.assert_frame_equal(result, expected)
result = df2.drop_duplicates(keep=False)
expected = df2.drop_duplicates(['AAA', 'B'], keep=False)
tm.assert_frame_equal(result, expected)
# integers
result = df.drop_duplicates('C')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
df['E'] = df['C'].astype('int8')
result = df.drop_duplicates('E')
expected = df.iloc[[0, 2]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('E', keep='last')
expected = df.iloc[[-2, -1]]
tm.assert_frame_equal(result, expected)
# GH 11376
df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
'y': [0, 6, 5, 5, 9, 1, 2]})
expected = df.loc[df.index != 3]
tm.assert_frame_equal(df.drop_duplicates(), expected)
df = pd.DataFrame([[1, 0], [0, 2]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-2, 0], [0, -4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
x = np.iinfo(np.int64).max / 3 * 2
df = pd.DataFrame([[-x, x], [0, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
df = pd.DataFrame([[-x, x], [x, x + 4]])
tm.assert_frame_equal(df.drop_duplicates(), df)
# GH 11864
df = pd.DataFrame([i] * 9 for i in range(16))
df = df.append([[1] + [0] * 8], ignore_index=True)
for keep in ['first', 'last', False]:
assert df.duplicated(keep=keep).sum() == 0
@pytest.mark.parametrize('subset', ['a', ['a'], ['a', 'B']])
def test_duplicated_with_misspelled_column_name(self, subset):
# GH 19730
df = pd.DataFrame({'A': [0, 0, 1],
'B': [0, 0, 1],
'C': [0, 0, 1]})
with pytest.raises(KeyError):
df.duplicated(subset)
with pytest.raises(KeyError):
df.drop_duplicates(subset)
@pytest.mark.slow
def test_duplicated_do_not_fail_on_wide_dataframes(self):
# gh-21524
# Given the wide dataframe with a lot of columns
# with different (important!) values
data = {'col_{0:02d}'.format(i): np.random.randint(0, 1000, 30000)
for i in range(100)}
df = pd.DataFrame(data).T
result = df.duplicated()
# Then duplicates produce the bool pd.Series as a result
# and don't fail during calculation.
# Actual values doesn't matter here, though usually
# it's all False in this case
assert isinstance(result, pd.Series)
assert result.dtype == np.bool
def test_drop_duplicates_with_duplicate_column_names(self):
# GH17836
df = DataFrame([
[1, 2, 5],
[3, 4, 6],
[3, 4, 7]
], columns=['a', 'a', 'b'])
result0 = df.drop_duplicates()
tm.assert_frame_equal(result0, df)
result1 = df.drop_duplicates('a')
expected1 = df[:2]
tm.assert_frame_equal(result1, expected1)
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates('AAA')
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep='last')
expected = df.iloc[[2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('AAA', keep=False)
expected = df.iloc[[2, 6]]
tm.assert_frame_equal(result, expected)
# multiple columns
result = df.drop_duplicates(['AAA', 'B'])
expected = df.iloc[[0, 1, 2, 3, 4, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep='last')
expected = df.iloc[[0, 1, 2, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['AAA', 'B'], keep=False)
expected = df.iloc[[0, 1, 2, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_tuple(self):
df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
result = df.drop_duplicates(('AA', 'AB'))
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep='last')
expected = df.loc[[6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(('AA', 'AB'), keep=False)
expected = df.loc[[]] # empty df
assert len(result) == 0
tm.assert_frame_equal(result, expected)
# multi column
expected = df.loc[[0, 1, 2, 3]]
result = df.drop_duplicates((('AA', 'AB'), 'B'))
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('A')
expected = df.loc[[0, 2, 3]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.loc[[1, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['A', 'B'])
expected = df.loc[[0, 2, 3, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep='last')
expected = df.loc[[1, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['A', 'B'], keep=False)
expected = df.loc[[6]]
tm.assert_frame_equal(result, expected)
# nan
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],
'D': lrange(8)})
# single column
result = df.drop_duplicates('C')
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.loc[[3, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.loc[[]] # empty df
tm.assert_frame_equal(result, expected)
assert len(result) == 0
# multi column
result = df.drop_duplicates(['C', 'B'])
expected = df.loc[[0, 1, 2, 4]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep='last')
expected = df.loc[[1, 3, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates(['C', 'B'], keep=False)
expected = df.loc[[1]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_NA_for_take_all(self):
# none
df = DataFrame({'A': [None, None, 'foo', 'bar',
'foo', 'baz', 'bar', 'qux'],
'C': [1.0, np.nan, np.nan, np.nan, 1., 2., 3, 1.]})
# single column
result = df.drop_duplicates('A')
expected = df.iloc[[0, 2, 3, 5, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep='last')
expected = df.iloc[[1, 4, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('A', keep=False)
expected = df.iloc[[5, 7]]
tm.assert_frame_equal(result, expected)
# nan
# single column
result = df.drop_duplicates('C')
expected = df.iloc[[0, 1, 5, 6]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep='last')
expected = df.iloc[[3, 5, 6, 7]]
tm.assert_frame_equal(result, expected)
result = df.drop_duplicates('C', keep=False)
expected = df.iloc[[5, 6]]
tm.assert_frame_equal(result, expected)
def test_drop_duplicates_inplace(self):
orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'bar', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': [1, 1, 2, 2, 2, 2, 1, 2],
'D': lrange(8)})
# single column
df = orig.copy()
df.drop_duplicates('A', inplace=True)
expected = orig[:2]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep='last', inplace=True)
expected = orig.loc[[6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates('A', keep=False, inplace=True)
expected = orig.loc[[]]
result = df
tm.assert_frame_equal(result, expected)
assert len(df) == 0
# multi column
df = orig.copy()
df.drop_duplicates(['A', 'B'], inplace=True)
expected = orig.loc[[0, 1, 2, 3]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep='last', inplace=True)
expected = orig.loc[[0, 5, 6, 7]]
result = df
tm.assert_frame_equal(result, expected)
df = orig.copy()
df.drop_duplicates(['A', 'B'], keep=False, inplace=True)
expected = orig.loc[[0]]
result = df
tm.assert_frame_equal(result, expected)
# consider everything
orig2 = orig.loc[:, ['A', 'B', 'C']].copy()
df2 = orig2.copy()
df2.drop_duplicates(inplace=True)
# in this case only
expected = orig2.drop_duplicates(['A', 'B'])
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep='last', inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep='last')
result = df2
tm.assert_frame_equal(result, expected)
df2 = orig2.copy()
df2.drop_duplicates(keep=False, inplace=True)
expected = orig2.drop_duplicates(['A', 'B'], keep=False)
result = df2
tm.assert_frame_equal(result, expected)
# Rounding
def test_round(self):
# GH 2665
# Test that rounding an empty DataFrame does nothing
df = DataFrame()
tm.assert_frame_equal(df, df.round())
# Here's the test frame we'll be working with
df = DataFrame({'col1': [1.123, 2.123, 3.123],
'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(df.round(), expected_rounded)
# Round with an integer
decimals = 2
expected_rounded = DataFrame({'col1': [1.12, 2.12, 3.12],
'col2': [1.23, 2.23, 3.23]})
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# This should also work with np.round (since np.round dispatches to
# df.round)
tm.assert_frame_equal(np.round(df, decimals), expected_rounded)
# Round with a list
round_list = [1, 2]
with pytest.raises(TypeError):
df.round(round_list)
# Round with a dictionary
expected_rounded = DataFrame(
{'col1': [1.1, 2.1, 3.1], 'col2': [1.23, 2.23, 3.23]})
round_dict = {'col1': 1, 'col2': 2}
tm.assert_frame_equal(df.round(round_dict), expected_rounded)
# Incomplete dict
expected_partially_rounded = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.2, 2.2, 3.2]})
partial_round_dict = {'col2': 1}
tm.assert_frame_equal(df.round(partial_round_dict),
expected_partially_rounded)
# Dict with unknown elements
wrong_round_dict = {'col3': 2, 'col2': 1}
tm.assert_frame_equal(df.round(wrong_round_dict),
expected_partially_rounded)
# float input to `decimals`
non_int_round_dict = {'col1': 1, 'col2': 0.5}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
# String input
non_int_round_dict = {'col1': 1, 'col2': 'foo'}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# List input
non_int_round_dict = {'col1': 1, 'col2': [1, 2]}
with pytest.raises(TypeError):
df.round(non_int_round_dict)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Non integer Series inputs
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
non_int_round_Series = Series(non_int_round_dict)
with pytest.raises(TypeError):
df.round(non_int_round_Series)
# Negative numbers
negative_round_dict = {'col1': -1, 'col2': -2}
big_df = df * 100
expected_neg_rounded = DataFrame(
{'col1': [110., 210, 310], 'col2': [100., 200, 300]})
tm.assert_frame_equal(big_df.round(negative_round_dict),
expected_neg_rounded)
# nan in Series round
nan_round_Series = Series({'col1': nan, 'col2': 1})
# TODO(wesm): unused?
expected_nan_round = DataFrame({ # noqa
'col1': [1.123, 2.123, 3.123],
'col2': [1.2, 2.2, 3.2]})
with pytest.raises(TypeError):
df.round(nan_round_Series)
# Make sure this doesn't break existing Series.round
tm.assert_series_equal(df['col1'].round(1), expected_rounded['col1'])
# named columns
# GH 11986
decimals = 2
expected_rounded = DataFrame(
{'col1': [1.12, 2.12, 3.12], 'col2': [1.23, 2.23, 3.23]})
df.columns.name = "cols"
expected_rounded.columns.name = "cols"
tm.assert_frame_equal(df.round(decimals), expected_rounded)
# interaction of named columns & series
tm.assert_series_equal(df['col1'].round(decimals),
expected_rounded['col1'])
tm.assert_series_equal(df.round(decimals)['col1'],
expected_rounded['col1'])
def test_numpy_round(self):
# See gh-12600
df = DataFrame([[1.53, 1.36], [0.06, 7.01]])
out = np.round(df, decimals=0)
expected = DataFrame([[2., 1.], [0., 7.]])
tm.assert_frame_equal(out, expected)
msg = "the 'out' parameter is not supported"
with tm.assert_raises_regex(ValueError, msg):
np.round(df, decimals=0, out=df)
def test_round_mixed_type(self):
# GH11885
df = DataFrame({'col1': [1.1, 2.2, 3.3, 4.4],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
round_0 = DataFrame({'col1': [1., 2., 3., 4.],
'col2': ['1', 'a', 'c', 'f'],
'col3': date_range('20111111', periods=4)})
tm.assert_frame_equal(df.round(), round_0)
tm.assert_frame_equal(df.round(1), df)
tm.assert_frame_equal(df.round({'col1': 1}), df)
tm.assert_frame_equal(df.round({'col1': 0}), round_0)
tm.assert_frame_equal(df.round({'col1': 0, 'col2': 1}), round_0)
tm.assert_frame_equal(df.round({'col3': 1}), df)
def test_round_issue(self):
# GH11611
df = pd.DataFrame(np.random.random([3, 3]), columns=['A', 'B', 'C'],
index=['first', 'second', 'third'])
dfs = pd.concat((df, df), axis=1)
rounded = dfs.round()
tm.assert_index_equal(rounded.index, dfs.index)
decimals = pd.Series([1, 0, 2], index=['A', 'B', 'A'])
pytest.raises(ValueError, df.round, decimals)
def test_built_in_round(self):
if not compat.PY3:
pytest.skip("build in round cannot be overridden "
"prior to Python 3")
# GH11763
# Here's the test frame we'll be working with
df = DataFrame(
{'col1': [1.123, 2.123, 3.123], 'col2': [1.234, 2.234, 3.234]})
# Default round to integer (i.e. decimals=0)
expected_rounded = DataFrame(
{'col1': [1., 2., 3.], 'col2': [1., 2., 3.]})
tm.assert_frame_equal(round(df), expected_rounded)
def test_pct_change(self):
# GH 11150
pnl = DataFrame([np.arange(0, 40, 10), np.arange(0, 40, 10), np.arange(
0, 40, 10)]).astype(np.float64)
pnl.iat[1, 0] = np.nan
pnl.iat[1, 1] = np.nan
pnl.iat[2, 3] = 60
for axis in range(2):
expected = pnl.ffill(axis=axis) / pnl.ffill(axis=axis).shift(
axis=axis) - 1
result = pnl.pct_change(axis=axis, fill_method='pad')
tm.assert_frame_equal(result, expected)
# Clip
def test_clip(self):
median = self.frame.median().median()
original = self.frame.copy()
capped = self.frame.clip_upper(median)
assert not (capped.values > median).any()
floored = self.frame.clip_lower(median)
assert not (floored.values < median).any()
double = self.frame.clip(upper=median, lower=median)
assert not (double.values != median).any()
# Verify that self.frame was not changed inplace
assert (self.frame.values == original.values).all()
def test_inplace_clip(self):
# GH #15388
median = self.frame.median().median()
frame_copy = self.frame.copy()
frame_copy.clip_upper(median, inplace=True)
assert not (frame_copy.values > median).any()
frame_copy = self.frame.copy()
frame_copy.clip_lower(median, inplace=True)
assert not (frame_copy.values < median).any()
frame_copy = self.frame.copy()
frame_copy.clip(upper=median, lower=median, inplace=True)
assert not (frame_copy.values != median).any()
def test_dataframe_clip(self):
# GH #2747
df = DataFrame(np.random.randn(1000, 2))
for lb, ub in [(-1, 1), (1, -1)]:
clipped_df = df.clip(lb, ub)
lb, ub = min(lb, ub), max(ub, lb)
lb_mask = df.values <= lb
ub_mask = df.values >= ub
mask = ~lb_mask & ~ub_mask
assert (clipped_df.values[lb_mask] == lb).all()
assert (clipped_df.values[ub_mask] == ub).all()
assert (clipped_df.values[mask] == df.values[mask]).all()
def test_clip_mixed_numeric(self):
# TODO(jreback)
# clip on mixed integer or floats
# with integer clippers coerces to float
df = DataFrame({'A': [1, 2, 3],
'B': [1., np.nan, 3.]})
result = df.clip(1, 2)
expected = DataFrame({'A': [1, 2, 2.],
'B': [1., np.nan, 2.]})
tm.assert_frame_equal(result, expected, check_like=True)
@pytest.mark.parametrize("inplace", [True, False])
def test_clip_against_series(self, inplace):
# GH #6966
df = DataFrame(np.random.randn(1000, 2))
lb = Series(np.random.randn(1000))
ub = lb + 1
original = df.copy()
clipped_df = df.clip(lb, ub, axis=0, inplace=inplace)
if inplace:
clipped_df = df
for i in range(2):
lb_mask = original.iloc[:, i] <= lb
ub_mask = original.iloc[:, i] >= ub
mask = ~lb_mask & ~ub_mask
result = clipped_df.loc[lb_mask, i]
tm.assert_series_equal(result, lb[lb_mask], check_names=False)
assert result.name == i
result = clipped_df.loc[ub_mask, i]
tm.assert_series_equal(result, ub[ub_mask], check_names=False)
assert result.name == i
tm.assert_series_equal(clipped_df.loc[mask, i], df.loc[mask, i])
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("lower", [[2, 3, 4], np.asarray([2, 3, 4])])
@pytest.mark.parametrize("axis,res", [
(0, [[2., 2., 3.], [4., 5., 6.], [7., 7., 7.]]),
(1, [[2., 3., 4.], [4., 5., 6.], [5., 6., 7.]])
])
def test_clip_against_list_like(self, inplace, lower, axis, res):
# GH #15390
original = self.simple.copy(deep=True)
result = original.clip(lower=lower, upper=[5, 6, 7],
axis=axis, inplace=inplace)
expected = pd.DataFrame(res,
columns=original.columns,
index=original.index)
if inplace:
result = original
tm.assert_frame_equal(result, expected, check_exact=True)
@pytest.mark.parametrize("axis", [0, 1, None])
def test_clip_against_frame(self, axis):
df = DataFrame(np.random.randn(1000, 2))
lb = DataFrame(np.random.randn(1000, 2))
ub = lb + 1
clipped_df = df.clip(lb, ub, axis=axis)
lb_mask = df <= lb
ub_mask = df >= ub
mask = ~lb_mask & ~ub_mask
tm.assert_frame_equal(clipped_df[lb_mask], lb[lb_mask])
tm.assert_frame_equal(clipped_df[ub_mask], ub[ub_mask])
tm.assert_frame_equal(clipped_df[mask], df[mask])
def test_clip_with_na_args(self):
"""Should process np.nan argument as None """
# GH # 17276
tm.assert_frame_equal(self.frame.clip(np.nan), self.frame)
tm.assert_frame_equal(self.frame.clip(upper=[1, 2, np.nan]),
self.frame)
tm.assert_frame_equal(self.frame.clip(lower=[1, np.nan, 3]),
self.frame)
tm.assert_frame_equal(self.frame.clip(upper=np.nan, lower=np.nan),
self.frame)
# Matrix-like
def test_dot(self):
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
result = a.dot(b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
# Check alignment
b1 = b.reindex(index=reversed(b.index))
result = a.dot(b)
tm.assert_frame_equal(result, expected)
# Check series argument
result = a.dot(b['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
result = a.dot(b1['one'])
tm.assert_series_equal(result, expected['one'], check_names=False)
assert result.name is None
# can pass correct-length arrays
row = a.iloc[0].values
result = a.dot(row)
exp = a.dot(a.iloc[0])
tm.assert_series_equal(result, exp)
with tm.assert_raises_regex(ValueError,
'Dot product shape mismatch'):
a.dot(row[:-1])
a = np.random.rand(1, 5)
b = np.random.rand(5, 1)
A = DataFrame(a)
# TODO(wesm): unused
B = DataFrame(b) # noqa
# it works
result = A.dot(b)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
df.dot(df2)
@pytest.mark.skipif(not PY35,
reason='matmul supported for Python>=3.5')
@pytest.mark.xfail(
_np_version_under1p12,
reason="unpredictable return types under numpy < 1.12")
def test_matmul(self):
# matmul test is for GH #10259
a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],
columns=['p', 'q', 'r', 's'])
b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],
columns=['one', 'two'])
# DataFrame @ DataFrame
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# DataFrame @ Series
result = operator.matmul(a, b.one)
expected = Series(np.dot(a.values, b.one.values),
index=['a', 'b', 'c'])
tm.assert_series_equal(result, expected)
# np.array @ DataFrame
result = operator.matmul(a.values, b)
expected = np.dot(a.values, b.values)
tm.assert_almost_equal(result, expected)
# nested list @ DataFrame (__rmatmul__)
result = operator.matmul(a.values.tolist(), b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_almost_equal(result.values, expected.values)
# mixed dtype DataFrame @ DataFrame
a['q'] = a.q.round().astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# different dtypes DataFrame @ DataFrame
a = a.astype(int)
result = operator.matmul(a, b)
expected = DataFrame(np.dot(a.values, b.values),
index=['a', 'b', 'c'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
# unaligned
df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))
df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])
with tm.assert_raises_regex(ValueError, 'aligned'):
operator.matmul(df, df2)
@pytest.fixture
def df_duplicates():
return pd.DataFrame({'a': [1, 2, 3, 4, 4],
'b': [1, 1, 1, 1, 1],
'c': [0, 1, 2, 5, 4]},
index=[0, 0, 1, 1, 1])
@pytest.fixture
def df_strings():
return pd.DataFrame({'a': np.random.permutation(10),
'b': list(ascii_lowercase[:10]),
'c': np.random.permutation(10).astype('float64')})
@pytest.fixture
def df_main_dtypes():
return pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
class TestNLargestNSmallest(object):
dtype_error_msg_template = ("Column {column!r} has dtype {dtype}, cannot "
"use method {method!r} with this dtype")
# ----------------------------------------------------------------------
# Top / bottom
@pytest.mark.parametrize('order', [
['a'],
['c'],
['a', 'b'],
['a', 'c'],
['b', 'a'],
['b', 'c'],
['a', 'b', 'c'],
['c', 'a', 'b'],
['c', 'b', 'a'],
['b', 'c', 'a'],
['b', 'a', 'c'],
# dups!
['b', 'c', 'c']])
@pytest.mark.parametrize('n', range(1, 11))
def test_n(self, df_strings, nselect_method, n, order):
# GH10393
df = df_strings
if 'b' in order:
error_msg = self.dtype_error_msg_template.format(
column='b', method=nselect_method, dtype='object')
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(n, order)
else:
ascending = nselect_method == 'nsmallest'
result = getattr(df, nselect_method)(n, order)
expected = df.sort_values(order, ascending=ascending).head(n)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('columns', [
('group', 'category_string'), ('group', 'string')])
def test_n_error(self, df_main_dtypes, nselect_method, columns):
df = df_main_dtypes
col = columns[1]
error_msg = self.dtype_error_msg_template.format(
column=col, method=nselect_method, dtype=df[col].dtype)
# escape some characters that may be in the repr
error_msg = (error_msg.replace('(', '\\(').replace(")", "\\)")
.replace("[", "\\[").replace("]", "\\]"))
with tm.assert_raises_regex(TypeError, error_msg):
getattr(df, nselect_method)(2, columns)
def test_n_all_dtypes(self, df_main_dtypes):
df = df_main_dtypes
df.nsmallest(2, list(set(df) - {'category_string', 'string'}))
df.nlargest(2, list(set(df) - {'category_string', 'string'}))
def test_n_identical_values(self):
# GH15297
df = pd.DataFrame({'a': [1] * 5, 'b': [1, 2, 3, 4, 5]})
result = df.nlargest(3, 'a')
expected = pd.DataFrame(
{'a': [1] * 3, 'b': [1, 2, 3]}, index=[0, 1, 2]
)
tm.assert_frame_equal(result, expected)
result = df.nsmallest(3, 'a')
expected = pd.DataFrame({'a': [1] * 3, 'b': [1, 2, 3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize('order', [
['a', 'b', 'c'],
['c', 'b', 'a'],
['a'],
['b'],
['a', 'b'],
['c', 'b']])
@pytest.mark.parametrize('n', range(1, 6))
def test_n_duplicate_index(self, df_duplicates, n, order):
# GH 13412
df = df_duplicates
result = df.nsmallest(n, order)
expected = df.sort_values(order).head(n)
tm.assert_frame_equal(result, expected)
result = df.nlargest(n, order)
expected = df.sort_values(order, ascending=False).head(n)
tm.assert_frame_equal(result, expected)
def test_duplicate_keep_all_ties(self):
# see gh-16818
df = pd.DataFrame({'a': [5, 4, 4, 2, 3, 3, 3, 3],
'b': [10, 9, 8, 7, 5, 50, 10, 20]})
result = df.nlargest(4, 'a', keep='all')
expected = pd.DataFrame({'a': {0: 5, 1: 4, 2: 4, 4: 3,
5: 3, 6: 3, 7: 3},
'b': {0: 10, 1: 9, 2: 8, 4: 5,
5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
result = df.nsmallest(2, 'a', keep='all')
expected = pd.DataFrame({'a': {3: 2, 4: 3, 5: 3, 6: 3, 7: 3},
'b': {3: 7, 4: 5, 5: 50, 6: 10, 7: 20}})
tm.assert_frame_equal(result, expected)
def test_series_broadcasting(self):
# smoke test for numpy warnings
# GH 16378, GH 16306
df = DataFrame([1.0, 1.0, 1.0])
df_nan = DataFrame({'A': [np.nan, 2.0, np.nan]})
s = Series([1, 1, 1])
s_nan = Series([np.nan, np.nan, 1])
with tm.assert_produces_warning(None):
df_nan.clip_lower(s, axis=0)
for op in ['lt', 'le', 'gt', 'ge', 'eq', 'ne']:
getattr(df, op)(s_nan, axis=0)
def test_series_nat_conversion(self):
# GH 18521
# Check rank does not mutate DataFrame
df = DataFrame(np.random.randn(10, 3), dtype='float64')
expected = df.copy()
df.rank()
result = df
tm.assert_frame_equal(result, expected)
|
pratapvardhan/pandas
|
pandas/tests/frame/test_analytics.py
|
Python
|
bsd-3-clause
| 94,264
|
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 18154)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 25714)
g.write('#endif\n')
if __name__ == '__main__':
main()
|
VsyncCrypto/Vsync
|
share/seeds/generate-seeds.py
|
Python
|
mit
| 4,187
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DVSNameArrayUplinkPortPolicy(vim, *args, **kwargs):
'''The uplink port policy specifies an array of uniform names for the uplink ports
across the hosts. The size of the array indicates the number of uplink ports
that will be created for each host in the switch.When the names in this array
change, the uplink ports on all the hosts are automatically renamed
accordingly. Increasing the number of names in the array automatically creates
additional uplink ports bearing the added name on each host. Decreasing the
number of name automatically deletes the unused uplink ports on each host.
Decreasing beyond the number of unused uplink port raises a fault.This policy
overrides the portgroup's port naming format, if both are defined and the
uplink ports are created in a uplink portgroup.'''
obj = vim.client.factory.create('ns0:DVSNameArrayUplinkPortPolicy')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'uplinkPortName' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/dvs_name_array_uplink_port_policy.py
|
Python
|
mit
| 1,743
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility module that contains APIs usable in the generated code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.utils.builtins import dynamic_builtin
from tensorflow.contrib.autograph.utils.builtins import dynamic_print
from tensorflow.contrib.autograph.utils.builtins import dynamic_range
from tensorflow.contrib.autograph.utils.context_managers import control_dependency_on_returns
from tensorflow.contrib.autograph.utils.misc import alias_tensors
from tensorflow.contrib.autograph.utils.multiple_dispatch import dynamic_is
from tensorflow.contrib.autograph.utils.multiple_dispatch import dynamic_is_not
from tensorflow.contrib.autograph.utils.multiple_dispatch import run_cond
from tensorflow.contrib.autograph.utils.py_func import wrap_py_func
from tensorflow.contrib.autograph.utils.tensor_list import dynamic_list_append
from tensorflow.contrib.autograph.utils.testing import fake_tf
from tensorflow.contrib.autograph.utils.type_check import is_tensor
from tensorflow.contrib.autograph.utils.type_hints import set_element_type
|
nburn42/tensorflow
|
tensorflow/contrib/autograph/utils/__init__.py
|
Python
|
apache-2.0
| 1,825
|
import struct
import sys
import tempfile
import unittest
from mock import Mock, patch
from nose.tools import eq_
from beehive.formatter import formatters
from beehive.formatter import pretty
# from beehive.formatter import tags
from beehive.formatter.base import StreamOpener
from beehive.model import Tag, Feature, Match, Scenario, Step
class TestGetTerminalSize(unittest.TestCase):
def setUp(self):
try:
self.ioctl_patch = patch('fcntl.ioctl')
self.ioctl = self.ioctl_patch.start()
except ImportError:
self.ioctl_patch = None
self.ioctl = None
self.zero_struct = struct.pack('HHHH', 0, 0, 0, 0)
def tearDown(self):
if self.ioctl_patch:
self.ioctl_patch.stop()
def test_windows_fallback(self):
platform = sys.platform
sys.platform = 'windows'
eq_(pretty.get_terminal_size(), (80, 24))
sys.platform = platform
def test_termios_fallback(self):
try:
import termios
assert termios
return
except ImportError:
pass
eq_(pretty.get_terminal_size(), (80, 24))
def test_exception_in_ioctl(self):
try:
import termios
except ImportError:
return
def raiser(*args, **kwargs):
raise Exception('yeehar!')
self.ioctl.side_effect = raiser
eq_(pretty.get_terminal_size(), (80, 24))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_happy_path(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = struct.pack('HHHH', 17, 23, 5, 5)
eq_(pretty.get_terminal_size(), (23, 17))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def test_zero_size_fallback(self):
try:
import termios
except ImportError:
return
self.ioctl.return_value = self.zero_struct
eq_(pretty.get_terminal_size(), (80, 24))
self.ioctl.assert_called_with(0, termios.TIOCGWINSZ, self.zero_struct)
def _tf():
'''Open a temp file that looks a bunch like stdout.
'''
if sys.version_info[0] == 3:
# in python3 it's got an encoding and accepts new-style strings
return tempfile.TemporaryFile(mode='w', encoding='UTF-8')
# pre-python3 it's not got an encoding and accepts encoded data
# (old-style strings)
return tempfile.TemporaryFile(mode='w')
class FormatterTests(unittest.TestCase):
formatter_name = "plain" # SANE DEFAULT, overwritten by concrete classes
def setUp(self):
self.config = Mock()
self.config.color = True
self.config.outputs = [StreamOpener(stream=sys.stdout)]
self.config.format = [self.formatter_name]
_line = 0
@property
def line(self):
self._line += 1
return self._line
def _formatter(self, file, config):
stream_opener = StreamOpener(stream=file)
f = formatters.get_formatter(config, [stream_opener])[0]
f.uri('<string>')
return f
def _feature(self, keyword=u'k\xe9yword', name=u'name', tags=[u'spam', u'ham'],
location=u'location', description=[u'description'], scenarios=[],
background=None):
line = self.line
tags = [Tag(tag_name, line) for tag_name in tags]
return Feature('<string>', line, keyword, name, tags=tags,
description=description, scenarios=scenarios,
background=background)
def _scenario(self, keyword=u'k\xe9yword', name=u'name', tags=[], steps=[]):
line = self.line
tags = [Tag(tag_name, line) for tag_name in tags]
return Scenario('<string>', line, keyword, name, tags=tags, steps=steps)
def _step(self, keyword=u'k\xe9yword', step_type='given', name=u'name',
text=None, table=None):
line = self.line
return Step('<string>', line, keyword, step_type, name, text=text,
table=table)
def _match(self, arguments=None):
def dummy():
pass
return Match(dummy, arguments)
def test_feature(self):
# this test does not actually check the result of the formatting; it
# just exists to make sure that formatting doesn't explode in the face of
# unicode and stuff
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
def test_scenario(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
s = self._scenario()
p.scenario(s)
def test_step(self):
p = self._formatter(_tf(), self.config)
f = self._feature()
p.feature(f)
s = self._scenario()
p.scenario(s)
s = self._step()
p.step(s)
p.match(self._match([]))
s.status = u'passed'
p.result(s)
class TestPretty(FormatterTests):
formatter_name = 'pretty'
class TestPlain(FormatterTests):
formatter_name = 'plain'
class TestJson(FormatterTests):
formatter_name = 'json'
class TestTagsCount(FormatterTests):
formatter_name = 'tags'
def test_tag_counts(self):
p = self._formatter(_tf(), self.config)
s = self._scenario(tags=[u'ham', u'foo'])
f = self._feature(scenarios=[s]) # feature.tags= ham, spam
p.feature(f)
p.scenario(s)
eq_(p.tag_counts, {'ham': [f, s], 'spam': [f], 'foo': [s]})
class MultipleFormattersTests(FormatterTests):
formatters = []
def setUp(self):
self.config = Mock()
self.config.color = True
self.config.outputs = [StreamOpener(stream=sys.stdout)
for i in self.formatters]
self.config.format = self.formatters
def _formatters(self, file, config):
stream_opener = StreamOpener(stream=file)
fs = formatters.get_formatter(config, [stream_opener])
for f in fs:
f.uri('<string>')
return fs
def test_feature(self):
# this test does not actually check the result of the formatting; it
# just exists to make sure that formatting doesn't explode in the face of
# unicode and stuff
ps = self._formatters(_tf(), self.config)
f = self._feature()
for p in ps:
p.feature(f)
def test_scenario(self):
ps = self._formatters(_tf(), self.config)
f = self._feature()
for p in ps:
p.feature(f)
s = self._scenario()
p.scenario(s)
def test_step(self):
ps = self._formatters(_tf(), self.config)
f = self._feature()
for p in ps:
p.feature(f)
s = self._scenario()
p.scenario(s)
s = self._step()
p.step(s)
p.match(self._match([]))
s.status = u'passed'
p.result(s)
class TestPrettyAndPlain(MultipleFormattersTests):
formatters = ['pretty', 'plain']
class TestPrettyAndJSON(MultipleFormattersTests):
formatters = ['pretty', 'json']
class TestJSONAndPlain(MultipleFormattersTests):
formatters = ['json', 'plain']
|
vrutkovs/beehive
|
test/test_formatter.py
|
Python
|
bsd-2-clause
| 7,335
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: tasking_ur.py
import mcl.framework
import mcl.tasking
class ur:
LP_MODULE_ID = 34825
TARGET_MODULE_ID = 34824
LP_RPC_INFO_LIST_DRIVERS = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 1}
LP_RPC_INFO_LIST_DATASOURCES = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 2}
LP_RPC_INFO_CONNECT = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 3}
LP_RPC_INFO_LIST_SERVERS = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 4}
LP_RPC_INFO_LIST_DATABASES = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 5}
LP_RPC_INFO_LIST_TABLES = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 6}
LP_RPC_INFO_LIST_COLUMNS = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 7}
LP_RPC_INFO_EXEC = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 8}
LP_RPC_INFO_LIST_HANDLES = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 9}
LP_RPC_INFO_DISCONNECT = {'buildType': 'Lp','moduleId': LP_MODULE_ID,'ppcId': 10}
TARGET_RPC_INFO_LIST_DRIVERS = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 1}
TARGET_RPC_INFO_LIST_DATASOURCES = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 2}
TARGET_RPC_INFO_CONNECT = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 3}
TARGET_RPC_INFO_LIST_SERVERS = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 4}
TARGET_RPC_INFO_LIST_DATABASES = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 5}
TARGET_RPC_INFO_LIST_TABLES = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 6}
TARGET_RPC_INFO_LIST_COLUMNS = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 7}
TARGET_RPC_INFO_EXEC = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 8}
TARGET_RPC_INFO_LIST_HANDLES = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 9}
TARGET_RPC_INFO_DISCONNECT = {'buildType': 'Target','moduleId': TARGET_MODULE_ID,'ppcId': 10}
RPC_INFO_DISCONNECT = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_DISCONNECT, LP_RPC_INFO_DISCONNECT])
RPC_INFO_EXEC = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_EXEC, LP_RPC_INFO_EXEC])
RPC_INFO_LIST_COLUMNS = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_COLUMNS, LP_RPC_INFO_LIST_COLUMNS])
RPC_INFO_LIST_DATASOURCES = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_DATASOURCES, LP_RPC_INFO_LIST_DATASOURCES])
RPC_INFO_LIST_SERVERS = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_SERVERS, LP_RPC_INFO_LIST_SERVERS])
RPC_INFO_LIST_DRIVERS = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_DRIVERS, LP_RPC_INFO_LIST_DRIVERS])
RPC_INFO_LIST_DATABASES = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_DATABASES, LP_RPC_INFO_LIST_DATABASES])
RPC_INFO_LIST_TABLES = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_TABLES, LP_RPC_INFO_LIST_TABLES])
RPC_INFO_LIST_HANDLES = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_LIST_HANDLES, LP_RPC_INFO_LIST_HANDLES])
RPC_INFO_CONNECT = mcl.tasking.RpcInfo(mcl.framework.UR, [TARGET_RPC_INFO_CONNECT, LP_RPC_INFO_CONNECT])
|
DarthMaulware/EquationGroupLeaks
|
Leak #5 - Lost In Translation/windows/Resources/ScRe/PyScripts/Lib/scre/mca/scre/cmd/sql/tasking_ur.py
|
Python
|
unlicense
| 3,341
|
#task_H
def dijkstra(start, graph):
n = len(graph)
D = [None] * n
D[start] = 0
index = 0
Q = [start]
while index < len(Q):
v = Q[index]
index += 1
for u in graph[v]:
if D[u] == None or D[v] + min(graph[v][u]) < D[u]:
D[u] = D[v] + min(graph[v][u])
Q.append(u)
return D
def reverse(graph):
n = len(graph)
graph_reversed = {x: {} for x, y in zip(range(n), range(n))}
for i in range(n):
for v in graph[i]:
for w in graph[i][v]:
add(graph_reversed, v, i, w)
def add(graph, a, b, w):
if b in graph[a]:
grph[a][b].append(w)
else:
graph[a][b] = [w]
def min_vertex(x, D, graph):
A = {v: w + D[v] for v, w in zip([u for u in graph[x].keys if D[u] != None], [min(graph[x][u]) for u in graph[x].keys if D[u] != None])}
L = list(A.items)
min_i = L[0][0]
min_v = L[0][1]
for v in A:
if A[v] < min_v:
min_v = A[v]
min_i = v
return min_i
def path(graph, D, s, f):
graph = reverse(graph)
x = f
P = [f]
while x != s:
x = min_vertex(x, D, graph)
P.append(x)
return P[-1::-1]
n, m, s, f = tuple(map(int, input().split()))
graph = {x: {} for x, y in zip(range(n), range(n))}
for i in range(m):
a, b, w = tuple(map(int, input().split()))
add(graph, a, b, w)
add(graph, b, a, w)
D = dijkstra(s, graph)
print(*path(graph, D, s, f))
|
Senbjorn/mipt_lab_2016
|
lab_19/task_H.py
|
Python
|
gpl-3.0
| 1,281
|
from microbit import uart
# global constants
NOTE_OFF = 0x80
NOTE_ON = 0x90
CONTROLLER_CHANGE = 0xB0
PROGRAM_CHANGE = 0xC0
class MidiOut:
def __init__(self, device=None, channel=1):
if device is None:
self.device = uart
self.device.init(baudrate=31250)
elif not hasattr(device, 'write'):
raise TypeError("device instance must have a 'write' method.")
else:
self.device = device
if channel < 1 or channel > 16:
raise ValueError('channel must be an integer between 1..16.')
self.channel = channel
def send(self, msg):
return self.device.write(bytes(msg))
def channel_message(self, command, *data, ch=None):
command = (command & 0xf0) | ((ch if ch else self.channel) - 1 & 0xf)
msg = [command] + [value & 0x7f for value in data]
self.send(msg)
def note_off(self, note, velocity=0, ch=None):
self.channel_message(NOTE_OFF, note, velocity, ch=ch)
def note_on(self, note, velocity=127, ch=None):
self.channel_message(NOTE_ON, note, velocity, ch=ch)
def control_change(self, control, value, lsb=False, ch=None):
self.channel_message(CONTROLLER_CHANGE, control,
value >> 7 if lsb else value, ch=ch)
if lsb and control < 20:
self.channel_message(CONTROLLER_CHANGE, control + 32, value, ch=ch)
def program_change(self, program, ch=None):
self.channel_message(PROGRAM_CHANGE, program, ch=ch)
# -----------------------------------------------------------------------------
# Main script
from microbit import button_a, display, sleep
while True:
if button_a.is_pressed():
display.set_pixel(0, 0, 0)
break
display.set_pixel(0, 0, 5)
sleep(100)
display.set_pixel(0, 0, 0)
sleep(100)
# Initialize UART for MIDI
midi = MidiOut()
while True:
# send NOTE ON for middle C (60) at velocity 100
midi.note_on(60, 100)
display.set_pixel(0, 0, 5)
sleep(500)
display.set_pixel(0, 0, 0)
# send NOTE OFF
midi.note_off(60)
sleep(500)
|
SpotlightKid/microbit-worldtour-monifa
|
test_midiout.py
|
Python
|
mit
| 2,125
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
FILE: sample_publish_events_to_a_topic_using_sas_credential_async.py
DESCRIPTION:
These samples demonstrate sending an EventGrid Event using a shared access signature for authentication.
USAGE:
python sample_publish_events_to_a_topic_using_sas_credential_async.py
Set the environment variables with your own values before running the sample:
1) EVENTGRID_SAS - The access key of your eventgrid account.
2) EVENTGRID_TOPIC_ENDPOINT - The topic hostname. Typically it exists in the format
"https://<YOUR-TOPIC-NAME>.<REGION-NAME>.eventgrid.azure.net/api/events".
"""
import os
import asyncio
from azure.eventgrid import EventGridEvent
from azure.eventgrid.aio import EventGridPublisherClient
from azure.core.credentials import AzureSasCredential
sas = os.environ["EVENTGRID_SAS"]
endpoint = os.environ["EVENTGRID_TOPIC_ENDPOINT"]
async def publish():
credential = AzureSasCredential(sas)
client = EventGridPublisherClient(endpoint, credential)
async with client:
await client.send([
EventGridEvent(
event_type="Contoso.Items.ItemReceived",
data={
"itemSku": "Contoso Item SKU #1"
},
subject="Door1",
data_version="2.0"
)
])
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(publish())
|
Azure/azure-sdk-for-python
|
sdk/eventgrid/azure-eventgrid/samples/async_samples/sample_publish_events_to_a_topic_using_sas_credential_async.py
|
Python
|
mit
| 1,719
|
class UserNotFoundException(Exception):
...
|
SamR1/FitTrackee
|
fittrackee/users/exceptions.py
|
Python
|
agpl-3.0
| 48
|
#!/usr/bin/env python
# 12.01.2007, c
import os.path as op
import shutil
from optparse import OptionParser
import sfepy
from sfepy.base.base import *
from sfepy.base.conf import ProblemConf, get_standard_keywords
from sfepy.fem import ProblemDefinition
from sfepy.fem.evaluate import assemble_by_blocks
from sfepy.homogenization.phono import transform_plot_data, plot_logs, \
plot_gaps, detect_band_gaps, compute_cat, compute_polarization_angles
from sfepy.homogenization.engine import HomogenizationEngine
from sfepy.applications import SimpleApp
from sfepy.solvers import Solver, eig
from sfepy.base.plotutils import plt
def make_save_hook( base_name, post_process_hook = None, file_per_var = None ):
def save_phono_correctors( state, problem, ir, ic ):
problem.save_state( (base_name % (ir, ic)) + '.vtk', state,
post_process_hook = post_process_hook,
file_per_var = file_per_var )
return save_phono_correctors
def try_set_defaults( obj, attr, defaults ):
try:
values = getattr( obj, attr )
set_defaults( values, defaults )
except:
values = defaults
return values
def report_iw_cat( iw_dir, christoffel ):
output( 'incident wave direction:' )
output( iw_dir )
output( 'Christoffel acoustic tensor:' )
output( christoffel )
class AcousticBandGapsApp( SimpleApp ):
def process_options( options ):
"""Application options setup. Sets default values for missing
non-compulsory options."""
get = options.get_default_attr
clear_cache = get( 'clear_cache', {} )
eigensolver = get( 'eigensolver', 'eig.sgscipy' )
eig_problem = get( 'eig_problem', 'simple' )
schur = get( 'schur', None )
elasticity_contrast = get( 'elasticity_contrast', 1.0 )
scale_epsilon = get( 'scale_epsilon', 1.0 )
incident_wave_dir = get( 'incident_wave_dir', None )
dispersion = get( 'dispersion', 'simple' )
dispersion_conf = get( 'dispersion_conf', None )
homogeneous = get( 'homogeneous', False )
save = get( 'save_eig_vectors', (0, 0) )
eig_range = get( 'eig_range', None )
freq_margins = get( 'freq_margins', (5, 5) )
# Given in per cent.
freq_margins = 0.01 * nm.array( freq_margins, dtype = nm.float64 )
fixed_eig_range = get( 'fixed_eig_range', None )
# Given in per cent.
freq_step = 0.01 * get( 'freq_step', 5 )
feps = get( 'feps', 1e-8 )
zeps = get( 'zeps', 1e-8 )
teps = get( 'teps', 1e-4 )
teps_rel = get( 'teps_rel', True )
eig_vector_transform = get( 'eig_vector_transform', None )
plot_transform = get( 'plot_transform', None )
plot_transform_wave = get( 'plot_transform_wave', None )
plot_transform_angle = get( 'plot_transform_angle', None )
plot_options = get( 'plot_options', {'show' : True,'legend' : False,} )
fig_name = get( 'fig_name', None )
fig_name_wave = get( 'fig_name_wave', None )
fig_name_angle = get( 'fig_name_angle', None )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : 'min eig($M^*$)',
'eig_mid' : 'mid eig($M^*$)',
'eig_max' : 'max eig($M^*$)',
'y_axis' : 'eigenvalues of mass matrix $M^*$',
}
plot_labels = try_set_defaults( options, 'plot_labels', aux )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'$\kappa$(min)',
'eig_mid' : r'$\kappa$(mid)',
'eig_max' : r'$\kappa$(max)',
'y_axis' : 'polarization angles',
}
plot_labels_angle = try_set_defaults( options, 'plot_labels_angle', aux )
aux = {
'resonance' : 'eigenfrequencies',
'masked' : 'masked eigenfrequencies',
'eig_min' : r'wave number (min)',
'eig_mid' : r'wave number (mid)',
'eig_max' : r'wave number (max)',
'y_axis' : 'wave numbers',
}
plot_labels_wave = try_set_defaults( options, 'plot_labels_wave', aux )
plot_rsc = {
'resonance' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : '-' },
'masked' : {'linewidth' : 0.5, 'color' : 'r', 'linestyle' : ':' },
'x_axis' : {'linewidth' : 0.5, 'color' : 'k', 'linestyle' : '--' },
'eig_min' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '--' },
'eig_mid' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '-.' },
'eig_max' : {'linewidth' : 0.5, 'color' : 'b', 'linestyle' : '-' },
'strong_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 0.5) },
'weak_gap' : {'linewidth' : 0, 'facecolor' : (1, 1, 1) },
'propagation' : {'linewidth' : 0, 'facecolor' : (0.5, 1, 0.5) },
'params' : {'axes.labelsize': 'large',
'text.fontsize': 'large',
'legend.fontsize': 'large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large',
'text.usetex': False},
}
plot_rsc = try_set_defaults( options, 'plot_rsc', plot_rsc )
eigenmomentum = get( 'eigenmomentum', None,
'missing "eigenmomentum" in options!' )
region_to_material = get( 'region_to_material', None,
'missing "region_to_material" in options!' )
tensor_names = get( 'tensor_names', None,
'missing "tensor_names" in options!' )
volume = get( 'volume', None, 'missing "volume" in options!' )
if eig_problem == 'simple_liquid':
liquid_region = get('liquid_region', None,
'missing "liquid_region" in options!')
else:
liquid_region = None
return Struct( **locals() )
process_options = staticmethod( process_options )
def process_options_pv( options ):
"""Application options setup for phase velocity computation. Sets
default values for missing non-compulsory options."""
get = options.get_default_attr
clear_cache = get( 'clear_cache', {} )
eigensolver = get( 'eigensolver', 'eig.sgscipy' )
incident_wave_dir = get( 'incident_wave_dir', None )
dispersion = get( 'dispersion', 'simple' )
dispersion_conf = get( 'dispersion_conf', None )
homogeneous = get( 'homogeneous', False )
fig_suffix = get( 'fig_suffix', '.pdf' )
region_to_material = get( 'region_to_material', None,
'missing "region_to_material" in options!' )
tensor_names = get( 'tensor_names', None,
'missing "tensor_names" in options!' )
volume = get( 'volume', None, 'missing "volume" in options!' )
return Struct( **locals() )
process_options_pv = staticmethod( process_options_pv )
def __init__( self, conf, options, output_prefix, **kwargs ):
SimpleApp.__init__( self, conf, options, output_prefix,
init_equations = False )
self.setup_options()
self.cached_coefs = None
self.cached_iw_dir = None
self.cached_christoffel = None
self.cached_evp = None
output_dir = self.problem.output_dir
shutil.copyfile( conf._filename,
op.join( output_dir, op.basename( conf._filename ) ) )
def setup_options( self ):
SimpleApp.setup_options( self )
if self.options.phase_velocity:
process_options = AcousticBandGapsApp.process_options_pv
else:
process_options = AcousticBandGapsApp.process_options
self.app_options += process_options( self.conf.options )
def call( self ):
"""In parametric runs, cached data (homogenized coefficients,
Christoffel acoustic tensor and eigenvalue problem solution) are
cleared according to 'clear_cache' aplication options.
Example:
clear_cache = {'cached_christoffel' : True, 'cached_evp' : True}
"""
options = self.options
for key, val in self.app_options.clear_cache.iteritems():
if val and key.startswith('cached_'):
setattr(self, key, None)
if options.phase_velocity:
# No band gaps in this case.
return self.compute_phase_velocity()
evp = self.solve_eigen_problem()
self.fix_eig_range( evp.eigs.shape[0] )
if options.detect_band_gaps:
bg = detect_band_gaps( self.problem, evp.kind,
evp.eigs_rescaled, evp.eig_vectors,
self.app_options, self.conf.funmod )
if options.plot:
plot_range, teigs = transform_plot_data( bg.logs.eigs,
bg.opts.plot_transform,
self.conf.funmod )
plot_rsc = bg.opts.plot_rsc
plot_opts = bg.opts.plot_options
plot_labels = bg.opts.plot_labels
plt.rcParams.update( plot_rsc['params'] )
fig = plot_gaps( 1, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 1, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name
if fig_name is not None:
fig.savefig( fig_name )
if plot_opts['show']:
plt.show()
elif options.analyze_dispersion:
christoffel, iw_dir = self.compute_cat(ret_iw_dir=True)
bg = detect_band_gaps( self.problem, evp.kind,
evp.eigs_rescaled, evp.eig_vectors,
self.app_options, self.conf.funmod,
christoffel = christoffel )
output( 'computing polarization angles...' )
pas = compute_polarization_angles( iw_dir, bg.logs.eig_vectors )
output( '...done' )
bg.polarization_angles = pas
output( 'computing phase velocity...' )
bg.phase_velocity = self.compute_phase_velocity()
output( '...done' )
if options.plot:
plot_rsc = bg.opts.plot_rsc
plot_opts = bg.opts.plot_options
plt.rcParams.update( plot_rsc['params'] )
aux = transform_plot_data( pas,
bg.opts.plot_transform_angle,
self.conf.funmod )
plot_range, pas = aux
plot_labels = bg.opts.plot_labels_angle
fig = plot_gaps( 1, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 1, plot_rsc, plot_labels, bg.logs.freqs, pas,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name_angle
if fig_name is not None:
fig.savefig( fig_name )
aux = transform_plot_data( bg.logs.eigs,
bg.opts.plot_transform_wave,
self.conf.funmod )
plot_range, teigs = aux
plot_labels = bg.opts.plot_labels_wave
fig = plot_gaps( 2, plot_rsc, bg.gaps, bg.kinds,
bg.freq_range_margins, plot_range,
clear = True )
fig = plot_logs( 2, plot_rsc, plot_labels, bg.logs.freqs, teigs,
bg.valid[bg.eig_range],
bg.freq_range_initial,
plot_range, False,
show_legend = plot_opts['legend'],
new_axes = True )
fig_name = bg.opts.fig_name_wave
if fig_name is not None:
fig.savefig( fig_name )
if plot_opts['show']:
plt.show()
else:
bg = None
return evp, bg
def fix_eig_range( self, n_eigs ):
eig_range = get_default( self.app_options.eig_range, (0, n_eigs) )
if eig_range[-1] < 0:
eig_range[-1] += n_eigs + 1
assert_( eig_range[0] < (eig_range[1] - 1) )
assert_( eig_range[1] <= n_eigs )
self.app_options.eig_range = eig_range
def solve_eigen_problem( self, ofn_trunk = None, post_process_hook = None ):
if self.cached_evp is not None:
return self.cached_evp
problem = self.problem
ofn_trunk = get_default( ofn_trunk, problem.ofn_trunk,
'output file name trunk missing!' )
post_process_hook = get_default( post_process_hook,
self.post_process_hook )
conf = self.conf
eig_problem = self.app_options.eig_problem
if eig_problem in ['simple', 'simple_liquid']:
problem.set_equations( conf.equations )
problem.time_update()
mtx_a = problem.evaluate(conf.equations['lhs'], mode='weak',
auto_init=True, dw_mode='matrix')
mtx_m = problem.evaluate(conf.equations['rhs'], mode='weak',
dw_mode='matrix')
elif eig_problem == 'schur':
# A = K + B^T D^{-1} B.
mtx = assemble_by_blocks( conf.equations, self.problem,
ebcs = conf.ebcs,
epbcs = conf.epbcs )
problem.set_equations( conf.equations )
problem.time_update()
ls = Solver.any_from_conf( problem.ls_conf,
presolve = True, mtx = mtx['D'] )
mtx_b, mtx_m = mtx['B'], mtx['M']
mtx_dib = nm.empty( mtx_b.shape, dtype = mtx_b.dtype )
for ic in xrange( mtx_b.shape[1] ):
mtx_dib[:,ic] = ls( mtx_b[:,ic].toarray().squeeze() )
mtx_a = mtx['K'] + mtx_b.T * mtx_dib
else:
raise NotImplementedError
## from sfepy.base.plotutils import spy, plt
## spy( mtx_b, eps = 1e-12 )
## plt.show()
## mtx_a.save( 'a.txt', format='%d %d %.12f\n' )
## mtx_b.save( 'b.txt', format='%d %d %.12f\n' )
## pause()
output( 'computing resonance frequencies...' )
tt = [0]
if isinstance( mtx_a, sc.sparse.spmatrix ):
mtx_a = mtx_a.toarray()
if isinstance( mtx_m, sc.sparse.spmatrix ):
mtx_m = mtx_m.toarray()
eigs, mtx_s_phi = eig(mtx_a, mtx_m, return_time=tt,
method=self.app_options.eigensolver)
eigs[eigs<0.0] = 0.0
output( '...done in %.2f s' % tt[0] )
output( 'original eigenfrequencies:' )
output( eigs )
opts = self.app_options
epsilon2 = opts.scale_epsilon * opts.scale_epsilon
eigs_rescaled = (opts.elasticity_contrast / epsilon2) * eigs
output( 'rescaled eigenfrequencies:' )
output( eigs_rescaled )
output( 'number of eigenfrequencies: %d' % eigs.shape[0] )
try:
assert_( nm.isfinite( eigs ).all() )
except ValueError:
debug()
# B-orthogonality check.
## print nm.dot( mtx_s_phi[:,5], nm.dot( mtx_m, mtx_s_phi[:,5] ) )
## print nm.dot( mtx_s_phi[:,5], nm.dot( mtx_m, mtx_s_phi[:,0] ) )
## debug()
n_eigs = eigs.shape[0]
variables = problem.get_variables()
mtx_phi = nm.empty( (variables.di.ptr[-1], mtx_s_phi.shape[1]),
dtype = nm.float64 )
make_full = variables.make_full_vec
if eig_problem in ['simple', 'simple_liquid']:
for ii in xrange( n_eigs ):
mtx_phi[:,ii] = make_full( mtx_s_phi[:,ii] )
eig_vectors = mtx_phi
elif eig_problem == 'schur':
# Update also eliminated variables.
schur = self.app_options.schur
primary_var = schur['primary_var']
eliminated_var = schur['eliminated_var']
mtx_s_phi_schur = - sc.dot( mtx_dib, mtx_s_phi )
aux = nm.empty( (variables.adi.ptr[-1],),
dtype = nm.float64 )
set = variables.set_state_part
for ii in xrange( n_eigs ):
set( aux, mtx_s_phi[:,ii], primary_var, stripped = True )
set( aux, mtx_s_phi_schur[:,ii], eliminated_var,
stripped = True )
mtx_phi[:,ii] = make_full( aux )
indx = variables.get_indx( primary_var )
eig_vectors = mtx_phi[indx,:]
save = self.app_options.save
out = {}
for ii in xrange( n_eigs ):
if (ii >= save[0]) and (ii < (n_eigs - save[1])): continue
aux = problem.state_to_output( mtx_phi[:,ii] )
for name, val in aux.iteritems():
out[name+'%03d' % ii] = val
if post_process_hook is not None:
out = post_process_hook( out, problem, mtx_phi )
problem.domain.mesh.write( ofn_trunk + '.vtk', io = 'auto', out = out )
fd = open( ofn_trunk + '_eigs.txt', 'w' )
eigs.tofile( fd, ' ' )
fd.close()
evp = Struct( kind = eig_problem,
eigs = eigs, eigs_rescaled = eigs_rescaled,
eig_vectors = eig_vectors )
self.cached_evp = evp
return evp
def eval_homogenized_coefs( self ):
if self.cached_coefs is not None:
return self.cached_coefs
opts = self.app_options
if opts.homogeneous:
rtm = opts.region_to_material
mat_region = rtm.keys()[0]
mat_name = rtm[mat_region]
self.problem.update_materials()
mat = self.problem.materials[mat_name]
coefs = mat.get_data( mat_region, 0, opts.tensor_names )
else:
dc = opts.dispersion_conf
dconf = ProblemConf.from_dict( dc['input'], dc['module'] )
dconf.materials = self.conf.materials
dconf.fe = self.conf.fe
dconf.regions.update( self.conf.regions )
dconf.options['output_dir'] = self.problem.output_dir
volume = opts.volume(self.problem, 'Y')
problem = ProblemDefinition.from_conf(dconf, init_equations=False)
he = HomogenizationEngine( problem, self.options, volume = volume )
coefs = he()
## print coefs
## pause()
output.prefix = self.output_prefix
self.cached_coefs = coefs
return coefs
def compute_cat( self, ret_iw_dir=False ):
"""Compute the Christoffel acoustic tensor, given the incident wave
direction."""
opts = self.app_options
iw_dir = nm.array( opts.incident_wave_dir, dtype = nm.float64 )
dim = self.problem.get_dim()
assert_( dim == iw_dir.shape[0] )
iw_dir = iw_dir / nla.norm( iw_dir )
if self.cached_christoffel is not None:
christoffel = self.cached_christoffel
else:
coefs = self.eval_homogenized_coefs()
christoffel = compute_cat( coefs, iw_dir,
self.app_options.dispersion )
report_iw_cat( iw_dir, christoffel )
self.cached_christoffel = christoffel
if ret_iw_dir:
return christoffel, iw_dir
else:
return christoffel
def compute_phase_velocity( self ):
from sfepy.homogenization.phono import compute_density_volume_info
opts = self.app_options
dim = self.problem.domain.mesh.dim
christoffel = self.compute_cat()
self.problem.update_materials()
dv_info = compute_density_volume_info( self.problem, opts.volume,
opts.region_to_material )
output( 'average density:', dv_info.average_density )
eye = nm.eye( dim, dim, dtype = nm.float64 )
mtx_mass = eye * dv_info.average_density
meigs, mvecs = eig( mtx_mass, mtx_b = christoffel,
eigenvectors = True, method = opts.eigensolver )
phase_velocity = 1.0 / nm.sqrt( meigs )
return phase_velocity
usage = """%prog [options] filename_in"""
help = {
'filename' :
'basename of output file(s) [default: <basename of input file>]',
'detect_band_gaps' :
'detect frequency band gaps',
'analyze_dispersion' :
'analyze dispersion properties (low frequency domain)',
'plot' :
'plot frequency band gaps, assumes -b',
'phase_velocity' :
'compute phase velocity (frequency-independet mass only)'
}
def main():
parser = OptionParser(usage = usage, version = "%prog " + sfepy.__version__)
parser.add_option( "-o", "", metavar = 'filename',
action = "store", dest = "output_filename_trunk",
default = None, help = help['filename'] )
parser.add_option( "-b", "--band-gaps",
action = "store_true", dest = "detect_band_gaps",
default = False, help = help['detect_band_gaps'] )
parser.add_option( "-d", "--dispersion",
action = "store_true", dest = "analyze_dispersion",
default = False, help = help['analyze_dispersion'] )
parser.add_option( "-p", "--plot",
action = "store_true", dest = "plot",
default = False, help = help['plot'] )
parser.add_option( "--phase-velocity",
action = "store_true", dest = "phase_velocity",
default = False, help = help['phase_velocity'] )
options, args = parser.parse_args()
if options.plot:
if plt is None:
output( 'matplotlib.pyplot cannot be imported, ignoring option -p!' )
options.plot = False
elif options.analyze_dispersion == False:
options.detect_band_gaps = True
if (len( args ) == 1):
filename_in = args[0];
else:
parser.print_help(),
return
required, other = get_standard_keywords()
required.remove( 'solver_[0-9]+|solvers' )
if options.phase_velocity:
required.remove( 'ebc_[0-9]+|ebcs' )
required.remove( 'equations' )
conf = ProblemConf.from_file( filename_in, required, other )
app = AcousticBandGapsApp( conf, options, 'eigen:' )
opts = conf.options
if hasattr( opts, 'parametric_hook' ): # Parametric study.
parametric_hook = getattr( conf, opts.parametric_hook )
app.parametrize( parametric_hook )
app()
if __name__ == '__main__':
## mtx_k = io.read_sparse_matrix_hdf5( '1todo/K.h5', output_format = 'csr' )
## print mtx_k.__repr__()
## mtx_m = io.read_sparse_matrix_hdf5( '1todo/M.h5', output_format = 'csr' )
## print mtx_m.__repr__()
## mtx_k.save( 'k.txt', format='%d %d %.12f\n' )
## mtx_m.save( 'm.txt', format='%d %d %.12f\n' )
## eigs, mtx_s_phi = eig( mtx_k.toarray(), mtx_m.toarray(),
## print_time = True )
## print eigs
## eigs, aux = eig( mtx_m.toarray(),
## print_time = True )
## print eigs
## pause()
main()
|
olivierverdier/sfepy
|
eigen.py
|
Python
|
bsd-3-clause
| 24,663
|
#!/usr/bin/python
#
# Copyright (C) 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Script for testing ganeti.hypervisor.hv_lxc"""
import unittest
from ganeti import constants
from ganeti import objects
from ganeti import hypervisor
from ganeti.hypervisor import hv_lxc
import testutils
class TestConsole(unittest.TestCase):
def test(self):
instance = objects.Instance(name="lxc.example.com",
primary_node="node199-uuid")
node = objects.Node(name="node199", uuid="node199-uuid",
ndparams={})
group = objects.NodeGroup(name="group991", ndparams={})
cons = hv_lxc.LXCHypervisor.GetInstanceConsole(instance, node, group,
{}, {})
self.assertTrue(cons.Validate())
self.assertEqual(cons.kind, constants.CONS_SSH)
self.assertEqual(cons.host, node.name)
self.assertEqual(cons.command[-1], instance.name)
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
badp/ganeti
|
test/py/ganeti.hypervisor.hv_lxc_unittest.py
|
Python
|
gpl-2.0
| 1,688
|
#
# Vagoth Cluster Management Framework
# Copyright (C) 2013 Robert Thomson
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
Actions are called by the job scheduler to perform tasks.
They contain a dictionary of arguments, but will be called
with an instance of Vagoth's Manager as the first argument.
"""
from ..exceptions import ActionException
from .. import get_manager
from .. import transaction
import logging
Manager = get_manager()
Allocator = Manager.config.make_factory("virt/allocator", context=Manager)
def log_vm_action(vm_name, action, msg=None):
"""
Call logging.info(), using transaction.get_txid() and .get_source()
along with the VM name and the message, if any.
"""
txid = transaction.get_txid()
source = transaction.get_source()
if msg:
logging.info("vagoth: txid={0} source={1} action={2} vm={3}: {4}".format(txid, source, action, vm_name, msg))
else:
logging.info("vagoth: txid={0} source={1} action={2} vm={3}".format(txid, source, action, vm_name))
def vm_define(manager, vm_name, hint=None, **kwargs):
"""
If a VM isn't allocated to a hypervisor node, it will
allocate it to a hypervisor node using the `Allocator`,
and call driver.define(hypervisor, vm)
"""
global Allocator
log_vm_action(vm_name, "define")
vm = manager.get_node(vm_name)
node = vm.parent
if node:
return
Allocator.allocate(vm, hint)
vm.refresh()
node = vm.parent
if node:
vm.state = "defined"
node.driver.define(node, vm)
def vm_provision(manager, vm_name, hint=None, **kwargs):
"""
If a VM isn't allocated to a hypervisor node, it will
allocate it to a hypervisor node using the `Allocator`,
and call driver.provision(hypervisor, vm)
"""
global Allocator
log_vm_action(vm_name, "provision")
vm = manager.get_node(vm_name)
node = vm.parent
if node:
return
Allocator.allocate(vm, hint)
vm.refresh()
node = vm.parent
if node:
vm.state = "defined"
node.driver.define(node, vm)
def vm_start(manager, vm_name, **kwargs):
"""
If a VM isn't allocated to a hypervisor node,
it will call the vm_define action, and if that
worked, call driver.start(hypervisor, vm)
"""
log_vm_action(vm_name, "start")
vm = manager.get_node(vm_name)
node = vm.parent
if not node:
manager.action("vm_define", vm_name=vm_name, **kwargs)
vm.refresh() # pick up state change
node = vm.parent
if node:
vm.state = "starting"
node.driver.start(node, vm)
else:
raise ActionException("VM not assigned")
def vm_stop(manager, vm_name, **kwargs):
"""
call driver.stop(hypervisor, vm)
"""
log_vm_action(vm_name, "stop")
vm = manager.get_node(vm_name)
node = vm.parent
if node:
vm.state = "stopping"
node.driver.stop(node, vm)
def vm_shutdown(manager, vm_name, **kwargs):
"""
call driver.stop(hypervisor, vm)
"""
log_vm_action(vm_name, "stop")
vm = manager.get_node(vm_name)
node = vm.parent
if node:
vm.state = "shutting down"
node.driver.shutdown(node, vm)
def vm_reboot(manager, vm_name, **kwargs):
"""
call driver.reboot(hypervisor, vm)
"""
log_vm_action(vm_name, "reboot")
vm = manager.get_node(vm_name)
node = vm.parent
if node:
vm.state = "rebooting"
node.driver.reboot(node, vm)
def vm_undefine(manager, vm_name):
"""
call driver.undefine(hypervisor, vm)
"""
log_vm_action(vm_name, "undefine")
vm = manager.get_node(vm_name)
node = vm.parent
if not node:
return
node.driver.undefine(node, vm)
def vm_deprovision(manager, vm_name):
"""
call driver.deprovision(hypervisor, vm)
"""
log_vm_action(vm_name, "deprovision")
vm = manager.get_node(vm_name)
node = vm.parent
if not node:
return
node.driver.deprovision(node, vm)
def vm_poll(manager, **kwargs):
"""
instantiate the monitor and poll all nodes
"""
monitor = Manager.config.make_factory("virt/monitor", context=Manager)
monitor.poll_nodes()
|
sippeproject/vagoth
|
vagoth/virt/actions.py
|
Python
|
lgpl-2.1
| 4,887
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
from trytond.pool import Pool, PoolMeta
__metaclass__ = PoolMeta
__all__ = ['Sale']
class Sale:
__name__ = 'sale.sale'
@property
def invoice_grouping_method(self):
return self.party.sale_invoice_grouping_method
@property
def _invoice_grouping_fields(self):
return ('state', 'company', 'type', 'journal', 'party',
'invoice_address', 'currency', 'account', 'payment_term')
def _get_grouped_invoice_order(self):
"Returns the order clause used to find invoice that should be grouped"
return None
def _get_grouped_invoice_domain(self, invoice):
"Returns a domain that will find invoices that should be grouped"
Invoice = Pool().get('account.invoice')
invoice_domain = [
('lines.origin', 'like', 'sale.line,%'),
]
defaults = Invoice.default_get(self._invoice_grouping_fields,
with_rec_name=False)
for field in self._invoice_grouping_fields:
invoice_domain.append(
(field, '=', getattr(invoice, field, defaults.get(field)))
)
return invoice_domain
def _get_invoice_sale(self, invoice_type):
Invoice = Pool().get('account.invoice')
invoice = super(Sale, self)._get_invoice_sale(invoice_type)
if self.invoice_grouping_method:
domain = self._get_grouped_invoice_domain(invoice)
order = self._get_grouped_invoice_order()
grouped_invoices = Invoice.search(domain, order=order, limit=1)
if grouped_invoices:
invoice, = grouped_invoices
return invoice
|
kret0s/gnuhealth-live
|
tryton/server/trytond-3.8.3/trytond/modules/sale_invoice_grouping/sale.py
|
Python
|
gpl-3.0
| 1,791
|
# coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class AudioChannelsTest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'number_of_channels': 'int',
'reject_on_error': 'bool',
'checked': 'bool'
}
attribute_map = {
'number_of_channels': 'number_of_channels',
'reject_on_error': 'reject_on_error',
'checked': 'checked'
}
def __init__(self, number_of_channels=None, reject_on_error=None, checked=None, local_vars_configuration=None): # noqa: E501
"""AudioChannelsTest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._number_of_channels = None
self._reject_on_error = None
self._checked = None
self.discriminator = None
if number_of_channels is not None:
self.number_of_channels = number_of_channels
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if checked is not None:
self.checked = checked
@property
def number_of_channels(self):
"""Gets the number_of_channels of this AudioChannelsTest. # noqa: E501
:return: The number_of_channels of this AudioChannelsTest. # noqa: E501
:rtype: int
"""
return self._number_of_channels
@number_of_channels.setter
def number_of_channels(self, number_of_channels):
"""Sets the number_of_channels of this AudioChannelsTest.
:param number_of_channels: The number_of_channels of this AudioChannelsTest. # noqa: E501
:type: int
"""
self._number_of_channels = number_of_channels
@property
def reject_on_error(self):
"""Gets the reject_on_error of this AudioChannelsTest. # noqa: E501
:return: The reject_on_error of this AudioChannelsTest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this AudioChannelsTest.
:param reject_on_error: The reject_on_error of this AudioChannelsTest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def checked(self):
"""Gets the checked of this AudioChannelsTest. # noqa: E501
:return: The checked of this AudioChannelsTest. # noqa: E501
:rtype: bool
"""
return self._checked
@checked.setter
def checked(self, checked):
"""Sets the checked of this AudioChannelsTest.
:param checked: The checked of this AudioChannelsTest. # noqa: E501
:type: bool
"""
self._checked = checked
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudioChannelsTest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AudioChannelsTest):
return True
return self.to_dict() != other.to_dict()
|
Telestream/telestream-cloud-python-sdk
|
telestream_cloud_qc_sdk/telestream_cloud_qc/models/audio_channels_test.py
|
Python
|
mit
| 5,000
|
from __future__ import division, print_function, absolute_import
import itertools
from numpy.testing import (assert_, assert_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose)
from pytest import raises as assert_raises
from numpy import mgrid, pi, sin, ogrid, poly1d, linspace
import numpy as np
from scipy._lib.six import xrange
from scipy._lib._numpy_compat import _assert_warns, suppress_warnings
from scipy.interpolate import (interp1d, interp2d, lagrange, PPoly, BPoly,
splrep, splev, splantider, splint, sproot, Akima1DInterpolator,
RegularGridInterpolator, LinearNDInterpolator, NearestNDInterpolator,
RectBivariateSpline, interpn, NdPPoly, BSpline)
from scipy.special import poch, gamma
from scipy.interpolate import _ppoly
from scipy._lib._gcutils import assert_deallocated
from scipy.integrate import nquad
from scipy.special import binom
class TestInterp2D(object):
def test_interp2d(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x+0.5*y)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
v,u = ogrid[0:2:24j, 0:pi:25j]
assert_almost_equal(I(u.ravel(), v.ravel()), sin(u+0.5*v), decimal=2)
def test_interp2d_meshgrid_input(self):
# Ticket #703
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
I = interp2d(x, y, z)
assert_almost_equal(I(1.0, 2.0), sin(2.0), decimal=2)
def test_interp2d_meshgrid_input_unsorted(self):
np.random.seed(1234)
x = linspace(0, 2, 16)
y = linspace(0, pi, 21)
z = sin(x[None,:] + y[:,None]/2.)
ip1 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
z = sin(x[None,:] + y[:,None]/2.)
ip2 = interp2d(x.copy(), y.copy(), z, kind='cubic')
np.random.shuffle(x)
np.random.shuffle(y)
z = sin(x[None,:] + y[:,None]/2.)
ip3 = interp2d(x, y, z, kind='cubic')
x = linspace(0, 2, 31)
y = linspace(0, pi, 30)
assert_equal(ip1(x, y), ip2(x, y))
assert_equal(ip1(x, y), ip3(x, y))
def test_interp2d_eval_unsorted(self):
y, x = mgrid[0:2:20j, 0:pi:21j]
z = sin(x + 0.5*y)
func = interp2d(x, y, z)
xe = np.array([3, 4, 5])
ye = np.array([5.3, 7.1])
assert_allclose(func(xe, ye), func(xe, ye[::-1]))
assert_raises(ValueError, func, xe, ye[::-1], 0, 0, True)
def test_interp2d_linear(self):
# Ticket #898
a = np.zeros([5, 5])
a[2, 2] = 1.0
x = y = np.arange(5)
b = interp2d(x, y, a, 'linear')
assert_almost_equal(b(2.0, 1.5), np.array([0.5]), decimal=2)
assert_almost_equal(b(2.0, 2.5), np.array([0.5]), decimal=2)
def test_interp2d_bounds(self):
x = np.linspace(0, 1, 5)
y = np.linspace(0, 2, 7)
z = x[None, :]**2 + y[:, None]
ix = np.linspace(-1, 3, 31)
iy = np.linspace(-1, 3, 33)
b = interp2d(x, y, z, bounds_error=True)
assert_raises(ValueError, b, ix, iy)
b = interp2d(x, y, z, fill_value=np.nan)
iz = b(ix, iy)
mx = (ix < 0) | (ix > 1)
my = (iy < 0) | (iy > 2)
assert_(np.isnan(iz[my,:]).all())
assert_(np.isnan(iz[:,mx]).all())
assert_(np.isfinite(iz[~my,:][:,~mx]).all())
class TestInterp1D(object):
def setup_method(self):
self.x5 = np.arange(5.)
self.x10 = np.arange(10.)
self.y10 = np.arange(10.)
self.x25 = self.x10.reshape((2,5))
self.x2 = np.arange(2.)
self.y2 = np.arange(2.)
self.x1 = np.array([0.])
self.y1 = np.array([0.])
self.y210 = np.arange(20.).reshape((2, 10))
self.y102 = np.arange(20.).reshape((10, 2))
self.y225 = np.arange(20.).reshape((2, 2, 5))
self.y25 = np.arange(10.).reshape((2, 5))
self.y235 = np.arange(30.).reshape((2, 3, 5))
self.y325 = np.arange(30.).reshape((3, 2, 5))
self.fill_value = -100.0
def test_validation(self):
# Make sure that appropriate exceptions are raised when invalid values
# are given to the constructor.
# These should all work.
for kind in ('nearest', 'zero', 'linear', 'slinear', 'quadratic', 'cubic'):
interp1d(self.x10, self.y10, kind=kind)
interp1d(self.x10, self.y10, kind=kind, fill_value="extrapolate")
interp1d(self.x10, self.y10, kind='linear', fill_value=(-1, 1))
interp1d(self.x10, self.y10, kind='linear',
fill_value=np.array([-1]))
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1,))
interp1d(self.x10, self.y10, kind='linear',
fill_value=-1)
interp1d(self.x10, self.y10, kind='linear',
fill_value=(-1, -1))
interp1d(self.x10, self.y10, kind=0)
interp1d(self.x10, self.y10, kind=1)
interp1d(self.x10, self.y10, kind=2)
interp1d(self.x10, self.y10, kind=3)
interp1d(self.x10, self.y210, kind='linear', axis=-1,
fill_value=(-1, -1))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=np.ones(10))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), np.ones(10)))
interp1d(self.x2, self.y210, kind='linear', axis=0,
fill_value=(np.ones(10), -1))
# x array must be 1D.
assert_raises(ValueError, interp1d, self.x25, self.y10)
# y array cannot be a scalar.
assert_raises(ValueError, interp1d, self.x10, np.array(0))
# Check for x and y arrays having the same length.
assert_raises(ValueError, interp1d, self.x10, self.y2)
assert_raises(ValueError, interp1d, self.x2, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y102)
interp1d(self.x10, self.y210)
interp1d(self.x10, self.y102, axis=0)
# Check for x and y having at least 1 element.
assert_raises(ValueError, interp1d, self.x1, self.y10)
assert_raises(ValueError, interp1d, self.x10, self.y1)
assert_raises(ValueError, interp1d, self.x1, self.y1)
# Bad fill values
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=(-1, -1, -1)) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array((-1, -1, -1))) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[[-1]]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=np.array([])) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x10, self.y10, kind='linear',
fill_value=()) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=[-1, -1]) # doesn't broadcast
assert_raises(ValueError, interp1d, self.x2, self.y210, kind='linear',
axis=0, fill_value=(0., [-1, -1])) # above doesn't bc
def test_init(self):
# Check that the attributes are initialized appropriately by the
# constructor.
assert_(interp1d(self.x10, self.y10).copy)
assert_(not interp1d(self.x10, self.y10, copy=False).copy)
assert_(interp1d(self.x10, self.y10).bounds_error)
assert_(not interp1d(self.x10, self.y10, bounds_error=False).bounds_error)
assert_(np.isnan(interp1d(self.x10, self.y10).fill_value))
assert_equal(interp1d(self.x10, self.y10, fill_value=3.0).fill_value,
3.0)
assert_equal(interp1d(self.x10, self.y10, fill_value=(1.0, 2.0)).fill_value,
(1.0, 2.0))
assert_equal(interp1d(self.x10, self.y10).axis, 0)
assert_equal(interp1d(self.x10, self.y210).axis, 1)
assert_equal(interp1d(self.x10, self.y102, axis=0).axis, 0)
assert_array_equal(interp1d(self.x10, self.y10).x, self.x10)
assert_array_equal(interp1d(self.x10, self.y10).y, self.y10)
assert_array_equal(interp1d(self.x10, self.y210).y, self.y210)
def test_assume_sorted(self):
# Check for unsorted arrays
interp10 = interp1d(self.x10, self.y10)
interp10_unsorted = interp1d(self.x10[::-1], self.y10[::-1])
assert_array_almost_equal(interp10_unsorted(self.x10), self.y10)
assert_array_almost_equal(interp10_unsorted(1.2), np.array([1.2]))
assert_array_almost_equal(interp10_unsorted([2.4, 5.6, 6.0]),
interp10([2.4, 5.6, 6.0]))
# Check assume_sorted keyword (defaults to False)
interp10_assume_kw = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=False)
assert_array_almost_equal(interp10_assume_kw(self.x10), self.y10)
interp10_assume_kw2 = interp1d(self.x10[::-1], self.y10[::-1],
assume_sorted=True)
# Should raise an error for unsorted input if assume_sorted=True
assert_raises(ValueError, interp10_assume_kw2, self.x10)
# Check that if y is a 2-D array, things are still consistent
interp10_y_2d = interp1d(self.x10, self.y210)
interp10_y_2d_unsorted = interp1d(self.x10[::-1], self.y210[:, ::-1])
assert_array_almost_equal(interp10_y_2d(self.x10),
interp10_y_2d_unsorted(self.x10))
def test_linear(self):
for kind in ['linear', 'slinear']:
self._check_linear(kind)
def _check_linear(self, kind):
# Check the actual implementation of linear interpolation.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]))
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind=kind,
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[-1, 0, 9, 11], rtol=1e-14)
opts = dict(kind=kind,
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_linear_dtypes(self):
# regression test for gh-5898, where 1D linear interpolation has been
# delegated to numpy.interp for all float dtypes, and the latter was
# not handling e.g. np.float128.
for dtyp in np.sctypes["float"]:
x = np.arange(8, dtype=dtyp)
y = x
yp = interp1d(x, y, kind='linear')(x)
assert_equal(yp.dtype, dtyp)
assert_allclose(yp, y, atol=1e-15)
def test_slinear_dtypes(self):
# regression test for gh-7273: 1D slinear interpolation fails with
# float32 inputs
dt_r = [np.float16, np.float32, np.float64]
dt_rc = dt_r + [np.complex64, np.complex128]
spline_kinds = ['slinear', 'zero', 'quadratic', 'cubic']
for dtx in dt_r:
x = np.arange(0, 10, dtype=dtx)
for dty in dt_rc:
y = np.exp(-x/3.0).astype(dty)
for dtn in dt_r:
xnew = x.astype(dtn)
for kind in spline_kinds:
f = interp1d(x, y, kind=kind, bounds_error=False)
assert_allclose(f(xnew), y, atol=1e-7,
err_msg="%s, %s %s" % (dtx, dty, dtn))
def test_cubic(self):
# Check the actual implementation of spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='cubic')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array([1.2]))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2.4, 5.6, 6.0]),)
def test_nearest(self):
# Check the actual implementation of nearest-neighbour interpolation.
interp10 = interp1d(self.x10, self.y10, kind='nearest')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 6., 6.]),)
# test fill_value="extrapolate"
extrapolator = interp1d(self.x10, self.y10, kind='nearest',
fill_value='extrapolate')
assert_allclose(extrapolator([-1., 0, 9, 11]),
[0, 0, 9, 9], rtol=1e-14)
opts = dict(kind='nearest',
fill_value='extrapolate',
bounds_error=True)
assert_raises(ValueError, interp1d, self.x10, self.y10, **opts)
def test_zero(self):
# Check the actual implementation of zero-order spline interpolation.
interp10 = interp1d(self.x10, self.y10, kind='zero')
assert_array_almost_equal(interp10(self.x10), self.y10)
assert_array_almost_equal(interp10(1.2), np.array(1.))
assert_array_almost_equal(interp10([2.4, 5.6, 6.0]),
np.array([2., 5., 6.]))
def _bounds_check(self, kind='linear'):
# Test that our handling of out-of-bounds input is correct.
extrap10 = interp1d(self.x10, self.y10, fill_value=self.fill_value,
bounds_error=False, kind=kind)
assert_array_equal(extrap10(11.2), np.array(self.fill_value))
assert_array_equal(extrap10(-3.4), np.array(self.fill_value))
assert_array_equal(extrap10([[[11.2], [-3.4], [12.6], [19.3]]]),
np.array(self.fill_value),)
assert_array_equal(extrap10._check_bounds(
np.array([-1.0, 0.0, 5.0, 9.0, 11.0])),
np.array([[True, False, False, False, False],
[False, False, False, False, True]]))
raises_bounds_error = interp1d(self.x10, self.y10, bounds_error=True,
kind=kind)
assert_raises(ValueError, raises_bounds_error, -1.0)
assert_raises(ValueError, raises_bounds_error, 11.0)
raises_bounds_error([0.0, 5.0, 9.0])
def _bounds_check_int_nan_fill(self, kind='linear'):
x = np.arange(10).astype(np.int_)
y = np.arange(10).astype(np.int_)
c = interp1d(x, y, kind=kind, fill_value=np.nan, bounds_error=False)
yi = c(x - 1)
assert_(np.isnan(yi[0]))
assert_array_almost_equal(yi, np.r_[np.nan, y[:-1]])
def test_bounds(self):
for kind in ('linear', 'cubic', 'nearest',
'slinear', 'zero', 'quadratic'):
self._bounds_check(kind)
self._bounds_check_int_nan_fill(kind)
def _check_fill_value(self, kind):
interp = interp1d(self.x10, self.y10, kind=kind,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
assert_array_almost_equal(interp([-10, 10]), [-100, 100])
# Proper broadcasting:
# interp along axis of length 5
# other dim=(2, 3), (3, 2), (2, 2), or (2,)
# one singleton fill_value (works for all)
for y in (self.y235, self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=100, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), 100)
assert_array_almost_equal(interp([-10, 10]), 100)
# singleton lower, singleton upper
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=(-100, 100), bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), -100)
if y.ndim == 3:
result = [[[-100, 100]] * y.shape[1]] * y.shape[0]
else:
result = [[-100, 100]] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (3,) fill_value
fill_value = [100, 200, 300]
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200],
[300, 300]]] * 2)
# one broadcastable (2,) fill_value
fill_value = [100, 200]
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
assert_array_almost_equal(interp(-10), result)
result = [[100, 100], [200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, singleton upper
fill_value = (np.array([-100, -200, -300]), 100)
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 100],
[-300, 100]]] * 2)
# broadcastable (2,) lower, singleton upper
fill_value = (np.array([-100, -200]), 100)
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y225, self.y325, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), 100)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 100]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# broadcastable (3,) lower, broadcastable (3,) upper
fill_value = ([-100, -200, -300], [100, 200, 300])
for y in (self.y325, self.y225):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2): # check ndarray as well as list here
if ii == 1:
fill_value = tuple(np.array(f) for f in fill_value)
interp = interp1d(self.x5, self.y235, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200, 300]] * 2)
assert_array_almost_equal(interp(-10), [[-100, -200, -300]] * 2)
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200],
[-300, 300]]] * 2)
# broadcastable (2,) lower, broadcastable (2,) upper
fill_value = ([-100, -200], [100, 200])
assert_raises(ValueError, interp1d, self.x5, self.y235, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for y in (self.y325, self.y225, self.y25):
interp = interp1d(self.x5, y, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
result = [100, 200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(10), result)
result = [-100, -200]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp(-10), result)
result = [[-100, 100], [-200, 200]]
if y.ndim == 3:
result = [result] * y.shape[0]
assert_array_almost_equal(interp([-10, 10]), result)
# one broadcastable (2, 2) array-like
fill_value = [[100, 200], [1000, 2000]]
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = np.array(fill_value)
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp([-10, 10]), [[[100, 100],
[200, 200]],
[[1000, 1000],
[2000, 2000]]])
# broadcastable (2, 2) lower, broadcastable (2, 2) upper
fill_value = ([[-100, -200], [-1000, -2000]],
[[100, 200], [1000, 2000]])
for y in (self.y235, self.y325, self.y25):
assert_raises(ValueError, interp1d, self.x5, y, kind=kind,
axis=-1, fill_value=fill_value, bounds_error=False)
for ii in range(2):
if ii == 1:
fill_value = (np.array(fill_value[0]), np.array(fill_value[1]))
interp = interp1d(self.x5, self.y225, kind=kind, axis=-1,
fill_value=fill_value, bounds_error=False)
assert_array_almost_equal(interp(10), [[100, 200], [1000, 2000]])
assert_array_almost_equal(interp(-10), [[-100, -200],
[-1000, -2000]])
assert_array_almost_equal(interp([-10, 10]), [[[-100, 100],
[-200, 200]],
[[-1000, 1000],
[-2000, 2000]]])
def test_fill_value(self):
# test that two-element fill value works
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_fill_value(kind)
def test_fill_value_writeable(self):
# backwards compat: fill_value is a public writeable attribute
interp = interp1d(self.x10, self.y10, fill_value=123.0)
assert_equal(interp.fill_value, 123.0)
interp.fill_value = 321.0
assert_equal(interp.fill_value, 321.0)
def _nd_check_interp(self, kind='linear'):
# Check the behavior when the inputs and outputs are multidimensional.
# Multidimensional input.
interp10 = interp1d(self.x10, self.y10, kind=kind)
assert_array_almost_equal(interp10(np.array([[3., 5.], [2., 7.]])),
np.array([[3., 5.], [2., 7.]]))
# Scalar input -> 0-dim scalar array output
assert_(isinstance(interp10(1.2), np.ndarray))
assert_equal(interp10(1.2).shape, ())
# Multidimensional outputs.
interp210 = interp1d(self.x10, self.y210, kind=kind)
assert_array_almost_equal(interp210(1.), np.array([1., 11.]))
assert_array_almost_equal(interp210(np.array([1., 2.])),
np.array([[1., 2.], [11., 12.]]))
interp102 = interp1d(self.x10, self.y102, axis=0, kind=kind)
assert_array_almost_equal(interp102(1.), np.array([2.0, 3.0]))
assert_array_almost_equal(interp102(np.array([1., 3.])),
np.array([[2., 3.], [6., 7.]]))
# Both at the same time!
x_new = np.array([[3., 5.], [2., 7.]])
assert_array_almost_equal(interp210(x_new),
np.array([[[3., 5.], [2., 7.]],
[[13., 15.], [12., 17.]]]))
assert_array_almost_equal(interp102(x_new),
np.array([[[6., 7.], [10., 11.]],
[[4., 5.], [14., 15.]]]))
def _nd_check_shape(self, kind='linear'):
# Check large ndim output shape
a = [4, 5, 6, 7]
y = np.arange(np.prod(a)).reshape(*a)
for n, s in enumerate(a):
x = np.arange(s)
z = interp1d(x, y, axis=n, kind=kind)
assert_array_almost_equal(z(x), y, err_msg=kind)
x2 = np.arange(2*3*1).reshape((2,3,1)) / 12.
b = list(a)
b[n:n+1] = [2,3,1]
assert_array_almost_equal(z(x2).shape, b, err_msg=kind)
def test_nd(self):
for kind in ('linear', 'cubic', 'slinear', 'quadratic', 'nearest',
'zero'):
self._nd_check_interp(kind)
self._nd_check_shape(kind)
def _check_complex(self, dtype=np.complex_, kind='linear'):
x = np.array([1, 2.5, 3, 3.1, 4, 6.4, 7.9, 8.0, 9.5, 10])
y = x * x ** (1 + 2j)
y = y.astype(dtype)
# simple test
c = interp1d(x, y, kind=kind)
assert_array_almost_equal(y[:-1], c(x)[:-1])
# check against interpolating real+imag separately
xi = np.linspace(1, 10, 31)
cr = interp1d(x, y.real, kind=kind)
ci = interp1d(x, y.imag, kind=kind)
assert_array_almost_equal(c(xi).real, cr(xi))
assert_array_almost_equal(c(xi).imag, ci(xi))
def test_complex(self):
for kind in ('linear', 'nearest', 'cubic', 'slinear', 'quadratic',
'zero'):
self._check_complex(np.complex64, kind)
self._check_complex(np.complex128, kind)
def test_circular_refs(self):
# Test interp1d can be automatically garbage collected
x = np.linspace(0, 1)
y = np.linspace(0, 1)
# Confirm interp can be released from memory after use
with assert_deallocated(interp1d, x, y) as interp:
new_y = interp([0.1, 0.2])
del interp
def test_overflow_nearest(self):
# Test that the x range doesn't overflow when given integers as input
x = np.array([0, 50, 127], dtype=np.int8)
ii = interp1d(x, x, kind='nearest')
assert_array_almost_equal(ii(x), x)
def test_local_nans(self):
# check that for local interpolation kinds (slinear, zero) a single nan
# only affects its local neighborhood
x = np.arange(10).astype(float)
y = x.copy()
y[6] = np.nan
for kind in ('zero', 'slinear'):
ir = interp1d(x, y, kind=kind)
vals = ir([4.9, 7.0])
assert_(np.isfinite(vals).all())
def test_spline_nans(self):
# Backwards compat: a single nan makes the whole spline interpolation
# return nans in an array of the correct shape. And it doesn't raise,
# just quiet nans because of backcompat.
x = np.arange(8).astype(float)
y = x.copy()
yn = y.copy()
yn[3] = np.nan
for kind in ['quadratic', 'cubic']:
ir = interp1d(x, y, kind=kind)
irn = interp1d(x, yn, kind=kind)
for xnew in (6, [1, 6], [[1, 6], [3, 5]]):
xnew = np.asarray(xnew)
out, outn = ir(x), irn(x)
assert_(np.isnan(outn).all())
assert_equal(out.shape, outn.shape)
class TestLagrange(object):
def test_lagrange(self):
p = poly1d([5,2,1,4,3])
xs = np.arange(len(p.coeffs))
ys = p(xs)
pl = lagrange(xs,ys)
assert_array_almost_equal(p.coeffs,pl.coeffs)
class TestAkima1DInterpolator(object):
def test_eval(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344, 5.9803623910336236590978842,
5.5067291516462386624652936, 5.2031367459745245795943447,
4.1796554159017080820603951, 3.4110386597938129327189927,
3.])
assert_allclose(ak(xi), yi)
def test_eval_2d(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.column_stack((y, 2. * y))
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi = np.column_stack((yi, 2. * yi))
assert_allclose(ak(xi), yi)
def test_eval_3d(self):
x = np.arange(0., 11.)
y_ = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
y = np.empty((11, 2, 2))
y[:, 0, 0] = y_
y[:, 1, 0] = 2. * y_
y[:, 0, 1] = 3. * y_
y[:, 1, 1] = 4. * y_
ak = Akima1DInterpolator(x, y)
xi = np.array([0., 0.5, 1., 1.5, 2.5, 3.5, 4.5, 5.1, 6.5, 7.2,
8.6, 9.9, 10.])
yi = np.empty((13, 2, 2))
yi_ = np.array([0., 1.375, 2., 1.5, 1.953125, 2.484375,
4.1363636363636366866103344,
5.9803623910336236590978842,
5.5067291516462386624652936,
5.2031367459745245795943447,
4.1796554159017080820603951,
3.4110386597938129327189927, 3.])
yi[:, 0, 0] = yi_
yi[:, 1, 0] = 2. * yi_
yi[:, 0, 1] = 3. * yi_
yi[:, 1, 1] = 4. * yi_
assert_allclose(ak(xi), yi)
def test_degenerate_case_multidimensional(self):
# This test is for issue #5683.
x = np.array([0, 1, 2])
y = np.vstack((x, x**2)).T
ak = Akima1DInterpolator(x, y)
x_eval = np.array([0.5, 1.5])
y_eval = ak(x_eval)
assert_allclose(y_eval, np.vstack((x_eval, x_eval**2)).T)
def test_extend(self):
x = np.arange(0., 11.)
y = np.array([0., 2., 1., 3., 2., 6., 5.5, 5.5, 2.7, 5.1, 3.])
ak = Akima1DInterpolator(x, y)
try:
ak.extend(None, None)
except NotImplementedError as e:
if str(e) != ("Extending a 1D Akima interpolator is not "
"yet implemented"):
raise
except:
raise
class TestPPolyCommon(object):
# test basic functionality for PPoly and BPoly
def test_sort_check(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 1, 0.5])
assert_raises(ValueError, PPoly, c, x)
assert_raises(ValueError, BPoly, c, x)
def test_ctor_c(self):
# wrong shape: `c` must be at least 2-dimensional
with assert_raises(ValueError):
PPoly([1, 2], [0, 1])
def test_extend(self):
# Test adding new points to the piecewise polynomial
np.random.seed(1234)
order = 3
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
for cls in (PPoly, BPoly):
pp = cls(c[:,:9], x[:10])
pp.extend(c[:,9:], x[10:])
pp2 = cls(c[:, 10:], x[10:])
pp2.extend(c[:, :10], x[:10])
pp3 = cls(c, x)
assert_array_equal(pp.c, pp3.c)
assert_array_equal(pp.x, pp3.x)
assert_array_equal(pp2.c, pp3.c)
assert_array_equal(pp2.x, pp3.x)
def test_extend_diff_orders(self):
# Test extending polynomial with different order one
np.random.seed(1234)
x = np.linspace(0, 1, 6)
c = np.random.rand(2, 5)
x2 = np.linspace(1, 2, 6)
c2 = np.random.rand(4, 5)
for cls in (PPoly, BPoly):
pp1 = cls(c, x)
pp2 = cls(c2, x2)
pp_comb = cls(c, x)
pp_comb.extend(c2, x2[1:])
# NB. doesn't match to pp1 at the endpoint, because pp1 is not
# continuous with pp2 as we took random coefs.
xi1 = np.linspace(0, 1, 300, endpoint=False)
xi2 = np.linspace(1, 2, 300)
assert_allclose(pp1(xi1), pp_comb(xi1))
assert_allclose(pp2(xi2), pp_comb(xi2))
def test_extend_descending(self):
np.random.seed(0)
order = 3
x = np.sort(np.random.uniform(0, 10, 20))
c = np.random.rand(order + 1, x.shape[0] - 1, 2, 3)
for cls in (PPoly, BPoly):
p = cls(c, x)
p1 = cls(c[:, :9], x[:10])
p1.extend(c[:, 9:], x[10:])
p2 = cls(c[:, 10:], x[10:])
p2.extend(c[:, :10], x[:10])
assert_array_equal(p1.c, p.c)
assert_array_equal(p1.x, p.x)
assert_array_equal(p2.c, p.c)
assert_array_equal(p2.x, p.x)
def test_shape(self):
np.random.seed(1234)
c = np.random.rand(8, 12, 5, 6, 7)
x = np.sort(np.random.rand(13))
xp = np.random.rand(3, 4)
for cls in (PPoly, BPoly):
p = cls(c, x)
assert_equal(p(xp).shape, (3, 4, 5, 6, 7))
# 'scalars'
for cls in (PPoly, BPoly):
p = cls(c[..., 0, 0, 0], x)
assert_equal(np.shape(p(0.5)), ())
assert_equal(np.shape(p(np.array(0.5))), ())
# can't use dtype=object (with any numpy; what fails is
# constructing the object array here for old numpy)
assert_raises(ValueError, p, np.array([[0.1, 0.2], [0.4]]))
def test_complex_coef(self):
np.random.seed(12345)
x = np.sort(np.random.random(13))
c = np.random.random((8, 12)) * (1. + 0.3j)
c_re, c_im = c.real, c.imag
xp = np.random.random(5)
for cls in (PPoly, BPoly):
p, p_re, p_im = cls(c, x), cls(c_re, x), cls(c_im, x)
for nu in [0, 1, 2]:
assert_allclose(p(xp, nu).real, p_re(xp, nu))
assert_allclose(p(xp, nu).imag, p_im(xp, nu))
def test_axis(self):
np.random.seed(12345)
c = np.random.rand(3, 4, 5, 6, 7, 8)
c_s = c.shape
xp = np.random.random((1, 2))
for axis in (0, 1, 2, 3):
k, m = c.shape[axis], c.shape[axis+1]
x = np.sort(np.random.rand(m+1))
for cls in (PPoly, BPoly):
p = cls(c, x, axis=axis)
assert_equal(p.c.shape,
c_s[axis:axis+2] + c_s[:axis] + c_s[axis+2:])
res = p(xp)
targ_shape = c_s[:axis] + xp.shape + c_s[2+axis:]
assert_equal(res.shape, targ_shape)
# deriv/antideriv does not drop the axis
for p1 in [cls(c, x, axis=axis).derivative(),
cls(c, x, axis=axis).derivative(2),
cls(c, x, axis=axis).antiderivative(),
cls(c, x, axis=axis).antiderivative(2)]:
assert_equal(p1.axis, p.axis)
# c array needs two axes for the coefficients and intervals, so
# 0 <= axis < c.ndim-1; raise otherwise
for axis in (-1, 4, 5, 6):
for cls in (BPoly, PPoly):
assert_raises(ValueError, cls, **dict(c=c, x=x, axis=axis))
class TestPolySubclassing(object):
class P(PPoly):
pass
class B(BPoly):
pass
def _make_polynomials(self):
np.random.seed(1234)
x = np.sort(np.random.random(3))
c = np.random.random((4, 2))
return self.P(c, x), self.B(c, x)
def test_derivative(self):
pp, bp = self._make_polynomials()
for p in (pp, bp):
pd = p.derivative()
assert_equal(p.__class__, pd.__class__)
ppa = pp.antiderivative()
assert_equal(pp.__class__, ppa.__class__)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = self.P.from_spline(spl)
assert_equal(pp.__class__, self.P)
def test_conversions(self):
pp, bp = self._make_polynomials()
pp1 = self.P.from_bernstein_basis(bp)
assert_equal(pp1.__class__, self.P)
bp1 = self.B.from_power_basis(pp)
assert_equal(bp1.__class__, self.B)
def test_from_derivatives(self):
x = [0, 1, 2]
y = [[1], [2], [3]]
bp = self.B.from_derivatives(x, y)
assert_equal(bp.__class__, self.B)
class TestPPoly(object):
def test_simple(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_periodic(self):
c = np.array([[1, 4], [2, 5], [3, 6]])
x = np.array([0, 0.5, 1])
p = PPoly(c, x, extrapolate='periodic')
assert_allclose(p(1.3), 1 * 0.3 ** 2 + 2 * 0.3 + 3)
assert_allclose(p(-0.3), 4 * (0.7 - 0.5) ** 2 + 5 * (0.7 - 0.5) + 6)
assert_allclose(p(1.3, 1), 2 * 0.3 + 2)
assert_allclose(p(-0.3, 1), 8 * (0.7 - 0.5) + 5)
def test_descending(self):
def binom_matrix(power):
n = np.arange(power + 1).reshape(-1, 1)
k = np.arange(power + 1)
B = binom(n, k)
return B[::-1, ::-1]
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-2, 2, size=(power + 1, m))
h = np.diff(x)
h_powers = h[None, :] ** np.arange(power + 1)[::-1, None]
B = binom_matrix(power)
cap = ca * h_powers
cdp = np.dot(B.T, cap)
cd = cdp / h_powers
pa = PPoly(ca, x, extrapolate=True)
pd = PPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-13)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-13)
roots_d = pd.roots()
roots_a = pa.roots()
assert_allclose(roots_a, np.sort(roots_d), rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5, 6)).shape, (5, 6) + c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
ip = p.antiderivative()
assert_equal(ip.c.shape, (7, 2, 1, 2, 3))
def test_construct_fast(self):
np.random.seed(1234)
c = np.array([[1, 4], [2, 5], [3, 6]], dtype=float)
x = np.array([0, 0.5, 1])
p = PPoly.construct_fast(c, x)
assert_allclose(p(0.3), 1*0.3**2 + 2*0.3 + 3)
assert_allclose(p(0.7), 4*(0.7-0.5)**2 + 5*(0.7-0.5) + 6)
def test_vs_alternative_implementations(self):
np.random.seed(1234)
c = np.random.rand(3, 12, 22)
x = np.sort(np.r_[0, np.random.rand(11), 1])
p = PPoly(c, x)
xp = np.r_[0.3, 0.5, 0.33, 0.6]
expected = _ppoly_eval_1(c, x, xp)
assert_allclose(p(xp), expected)
expected = _ppoly_eval_2(c[:,:,0], x, xp)
assert_allclose(p(xp)[:,0], expected)
def test_from_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
assert_allclose(pp(xi), splev(xi, spl))
# make sure .from_spline accepts BSpline objects
b = BSpline(*spl)
ppp = PPoly.from_spline(b)
assert_allclose(ppp(xi), b(xi))
# BSpline's extrapolate attribute propagates unless overridden
t, c, k = spl
for extrap in (None, True, False):
b = BSpline(t, c, k, extrapolate=extrap)
p = PPoly.from_spline(b)
assert_equal(p.extrapolate, b.extrapolate)
def test_derivative_simple(self):
np.random.seed(1234)
c = np.array([[4, 3, 2, 1]]).T
dc = np.array([[3*4, 2*3, 2]]).T
ddc = np.array([[2*3*4, 1*2*3]]).T
x = np.array([0, 1])
pp = PPoly(c, x)
dpp = PPoly(dc, x)
ddpp = PPoly(ddc, x)
assert_allclose(pp.derivative().c, dpp.c)
assert_allclose(pp.derivative(2).c, ddpp.c)
def test_derivative_eval(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 3):
assert_allclose(pp(xi, dx), splev(xi, spl, dx))
def test_derivative(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
xi = np.linspace(0, 1, 200)
for dx in range(0, 10):
assert_allclose(pp(xi, dx), pp.derivative(dx)(xi),
err_msg="dx=%d" % (dx,))
def test_antiderivative_of_constant(self):
# https://github.com/scipy/scipy/issues/4216
p = PPoly([[1.]], [0, 1])
assert_equal(p.antiderivative().c, PPoly([[1], [0]], [0, 1]).c)
assert_equal(p.antiderivative().x, PPoly([[1], [0]], [0, 1]).x)
def test_antiderivative_regression_4355(self):
# https://github.com/scipy/scipy/issues/4355
p = PPoly([[1., 0.5]], [0, 1, 2])
q = p.antiderivative()
assert_equal(q.c, [[1, 0.5], [0, 1]])
assert_equal(q.x, [0, 1, 2])
assert_allclose(p.integrate(0, 2), 1.5)
assert_allclose(q(2) - q(0), 1.5)
def test_antiderivative_simple(self):
np.random.seed(1234)
# [ p1(x) = 3*x**2 + 2*x + 1,
# p2(x) = 1.6875]
c = np.array([[3, 2, 1], [0, 0, 1.6875]]).T
# [ pp1(x) = x**3 + x**2 + x,
# pp2(x) = 1.6875*(x - 0.25) + pp1(0.25)]
ic = np.array([[1, 1, 1, 0], [0, 0, 1.6875, 0.328125]]).T
# [ ppp1(x) = (1/4)*x**4 + (1/3)*x**3 + (1/2)*x**2,
# ppp2(x) = (1.6875/2)*(x - 0.25)**2 + pp1(0.25)*x + ppp1(0.25)]
iic = np.array([[1/4, 1/3, 1/2, 0, 0],
[0, 0, 1.6875/2, 0.328125, 0.037434895833333336]]).T
x = np.array([0, 0.25, 1])
pp = PPoly(c, x)
ipp = pp.antiderivative()
iipp = pp.antiderivative(2)
iipp2 = ipp.antiderivative()
assert_allclose(ipp.x, x)
assert_allclose(ipp.c.T, ic.T)
assert_allclose(iipp.c.T, iic.T)
assert_allclose(iipp2.c.T, iic.T)
def test_antiderivative_vs_derivative(self):
np.random.seed(1234)
x = np.linspace(0, 1, 30)**2
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
ipp = pp.antiderivative(dx)
# check that derivative is inverse op
pp2 = ipp.derivative(dx)
assert_allclose(pp.c, pp2.c)
# check continuity
for k in range(dx):
pp2 = ipp.derivative(k)
r = 1e-13
endpoint = r*pp2.x[:-1] + (1 - r)*pp2.x[1:]
assert_allclose(pp2(pp2.x[1:]), pp2(endpoint),
rtol=1e-7, err_msg="dx=%d k=%d" % (dx, k))
def test_antiderivative_vs_spline(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
for dx in range(0, 10):
pp2 = pp.antiderivative(dx)
spl2 = splantider(spl, dx)
xi = np.linspace(0, 1, 200)
assert_allclose(pp2(xi), splev(xi, spl2),
rtol=1e-7)
def test_antiderivative_continuity(self):
c = np.array([[2, 1, 2, 2], [2, 1, 3, 3]]).T
x = np.array([0, 0.5, 1])
p = PPoly(c, x)
ip = p.antiderivative()
# check continuity
assert_allclose(ip(0.5 - 1e-9), ip(0.5 + 1e-9), rtol=1e-8)
# check that only lowest order coefficients were changed
p2 = ip.derivative()
assert_allclose(p2.c, p.c)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.r_[0, np.random.rand(11), 1])
y = np.random.rand(len(x))
spl = splrep(x, y, s=0, k=5)
pp = PPoly.from_spline(spl)
a, b = 0.3, 0.9
ig = pp.integrate(a, b)
ipp = pp.antiderivative()
assert_allclose(ig, ipp(b) - ipp(a))
assert_allclose(ig, splint(a, b, spl))
a, b = -0.3, 0.9
ig = pp.integrate(a, b, extrapolate=True)
assert_allclose(ig, ipp(b) - ipp(a))
assert_(np.isnan(pp.integrate(a, b, extrapolate=False)).all())
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = PPoly(c, x, extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_roots(self):
x = np.linspace(0, 1, 31)**2
y = np.sin(30*x)
spl = splrep(x, y, s=0, k=3)
pp = PPoly.from_spline(spl)
r = pp.roots()
r = r[(r >= 0 - 1e-15) & (r <= 1 + 1e-15)]
assert_allclose(r, sproot(spl), atol=1e-15)
def test_roots_idzero(self):
# Roots for piecewise polynomials with identically zero
# sections.
c = np.array([[-1, 0.25], [0, 0], [-1, 0.25]]).T
x = np.array([0, 0.4, 0.6, 1.0])
pp = PPoly(c, x)
assert_array_equal(pp.roots(),
[0.25, 0.4, np.nan, 0.6 + 0.25])
# ditto for p.solve(const) with sections identically equal const
const = 2.
c1 = c.copy()
c1[1, :] += const
pp1 = PPoly(c1, x)
assert_array_equal(pp1.solve(const),
[0.25, 0.4, np.nan, 0.6 + 0.25])
def test_roots_all_zero(self):
# test the code path for the polynomial being identically zero everywhere
c = [[0], [0]]
x = [0, 1]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan])
assert_array_equal(p.solve(0), [0, np.nan])
assert_array_equal(p.solve(1), [])
c = [[0, 0], [0, 0]]
x = [0, 1, 2]
p = PPoly(c, x)
assert_array_equal(p.roots(), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(0), [0, np.nan, 1, np.nan])
assert_array_equal(p.solve(1), [])
def test_roots_repeated(self):
# Check roots repeated in multiple sections are reported only
# once.
# [(x + 1)**2 - 1, -x**2] ; x == 0 is a repeated root
c = np.array([[1, 0, -1], [-1, 0, 0]]).T
x = np.array([-1, 0, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [-2, 0])
assert_array_equal(pp.roots(extrapolate=False), [0])
def test_roots_discont(self):
# Check that a discontinuity across zero is reported as root
c = np.array([[1], [-1]]).T
x = np.array([0, 0.5, 1])
pp = PPoly(c, x)
assert_array_equal(pp.roots(), [0.5])
assert_array_equal(pp.roots(discontinuity=False), [])
# ditto for a discontinuity across y:
assert_array_equal(pp.solve(0.5), [0.5])
assert_array_equal(pp.solve(0.5, discontinuity=False), [])
assert_array_equal(pp.solve(1.5), [])
assert_array_equal(pp.solve(1.5, discontinuity=False), [])
def test_roots_random(self):
# Check high-order polynomials with random coefficients
np.random.seed(1234)
num = 0
for extrapolate in (True, False):
for order in range(0, 20):
x = np.unique(np.r_[0, 10 * np.random.rand(30), 10])
c = 2*np.random.rand(order+1, len(x)-1, 2, 3) - 1
pp = PPoly(c, x)
for y in [0, np.random.random()]:
r = pp.solve(y, discontinuity=False, extrapolate=extrapolate)
for i in range(2):
for j in range(3):
rr = r[i,j]
if rr.size > 0:
# Check that the reported roots indeed are roots
num += rr.size
val = pp(rr, extrapolate=extrapolate)[:,i,j]
cmpval = pp(rr, nu=1,
extrapolate=extrapolate)[:,i,j]
msg = "(%r) r = %s" % (extrapolate, repr(rr),)
assert_allclose((val-y) / cmpval, 0, atol=1e-7,
err_msg=msg)
# Check that we checked a number of roots
assert_(num > 100, repr(num))
def test_roots_croots(self):
# Test the complex root finding algorithm
np.random.seed(1234)
for k in range(1, 15):
c = np.random.rand(k, 1, 130)
if k == 3:
# add a case with zero discriminant
c[:,0,0] = 1, 2, 1
for y in [0, np.random.random()]:
w = np.empty(c.shape, dtype=complex)
_ppoly._croots_poly1(c, w)
if k == 1:
assert_(np.isnan(w).all())
continue
res = 0
cres = 0
for i in range(k):
res += c[i,None] * w**(k-1-i)
cres += abs(c[i,None] * w**(k-1-i))
with np.errstate(invalid='ignore'):
res /= cres
res = res.ravel()
res = res[~np.isnan(res)]
assert_allclose(res, 0, atol=1e-10)
def test_extrapolate_attr(self):
# [ 1 - x**2 ]
c = np.array([[-1, 0, 1]]).T
x = np.array([0, 1])
for extrapolate in [True, False, None]:
pp = PPoly(c, x, extrapolate=extrapolate)
pp_d = pp.derivative()
pp_i = pp.antiderivative()
if extrapolate is False:
assert_(np.isnan(pp([-0.1, 1.1])).all())
assert_(np.isnan(pp_i([-0.1, 1.1])).all())
assert_(np.isnan(pp_d([-0.1, 1.1])).all())
assert_equal(pp.roots(), [1])
else:
assert_allclose(pp([-0.1, 1.1]), [1-0.1**2, 1-1.1**2])
assert_(not np.isnan(pp_i([-0.1, 1.1])).any())
assert_(not np.isnan(pp_d([-0.1, 1.1])).any())
assert_allclose(pp.roots(), [1, -1])
class TestBPoly(object):
def test_simple(self):
x = [0, 1]
c = [[3]]
bp = BPoly(c, x)
assert_allclose(bp(0.1), 3.)
def test_simple2(self):
x = [0, 1]
c = [[3], [1]]
bp = BPoly(c, x) # 3*(1-x) + 1*x
assert_allclose(bp(0.1), 3*0.9 + 1.*0.1)
def test_simple3(self):
x = [0, 1]
c = [[3], [1], [4]]
bp = BPoly(c, x) # 3 * (1-x)**2 + 2 * x (1-x) + 4 * x**2
assert_allclose(bp(0.2),
3 * 0.8*0.8 + 1 * 2*0.2*0.8 + 4 * 0.2*0.2)
def test_simple4(self):
x = [0, 1]
c = [[1], [1], [1], [2]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**3 +
3 * 0.7**2 * 0.3 +
3 * 0.7 * 0.3**2 +
2 * 0.3**3)
def test_simple5(self):
x = [0, 1]
c = [[1], [1], [8], [2], [1]]
bp = BPoly(c, x)
assert_allclose(bp(0.3), 0.7**4 +
4 * 0.7**3 * 0.3 +
8 * 6 * 0.7**2 * 0.3**2 +
2 * 4 * 0.7 * 0.3**3 +
0.3**4)
def test_periodic(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
# [3*(1-x)**2, 2*((x-1)/2)**2]
bp = BPoly(c, x, extrapolate='periodic')
assert_allclose(bp(3.4), 3 * 0.6**2)
assert_allclose(bp(-1.3), 2 * (0.7/2)**2)
assert_allclose(bp(3.4, 1), -6 * 0.6)
assert_allclose(bp(-1.3, 1), 2 * (0.7/2))
def test_descending(self):
np.random.seed(0)
power = 3
for m in [10, 20, 30]:
x = np.sort(np.random.uniform(0, 10, m + 1))
ca = np.random.uniform(-0.1, 0.1, size=(power + 1, m))
# We need only to flip coefficients to get it right!
cd = ca[::-1].copy()
pa = BPoly(ca, x, extrapolate=True)
pd = BPoly(cd[:, ::-1], x[::-1], extrapolate=True)
x_test = np.random.uniform(-10, 20, 100)
assert_allclose(pa(x_test), pd(x_test), rtol=1e-13)
assert_allclose(pa(x_test, 1), pd(x_test, 1), rtol=1e-13)
pa_d = pa.derivative()
pd_d = pd.derivative()
assert_allclose(pa_d(x_test), pd_d(x_test), rtol=1e-13)
# Antiderivatives won't be equal because fixing continuity is
# done in the reverse order, but surely the differences should be
# equal.
pa_i = pa.antiderivative()
pd_i = pd.antiderivative()
for a, b in np.random.uniform(-10, 20, (5, 2)):
int_a = pa.integrate(a, b)
int_d = pd.integrate(a, b)
assert_allclose(int_a, int_d, rtol=1e-12)
assert_allclose(pa_i(b) - pa_i(a), pd_i(b) - pd_i(a),
rtol=1e-12)
def test_multi_shape(self):
c = np.random.rand(6, 2, 1, 2, 3)
x = np.array([0, 0.5, 1])
p = BPoly(c, x)
assert_equal(p.x.shape, x.shape)
assert_equal(p.c.shape, c.shape)
assert_equal(p(0.3).shape, c.shape[2:])
assert_equal(p(np.random.rand(5,6)).shape,
(5,6)+c.shape[2:])
dp = p.derivative()
assert_equal(dp.c.shape, (5, 2, 1, 2, 3))
def test_interval_length(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
xval = 0.1
s = xval / 2 # s = (x - xa) / (xb - xa)
assert_allclose(bp(xval), 3 * (1-s)*(1-s) + 1 * 2*s*(1-s) + 4 * s*s)
def test_two_intervals(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
assert_allclose(bp(0.4), 3 * 0.6*0.6)
assert_allclose(bp(1.7), 2 * (0.7/2)**2)
def test_extrapolate_attr(self):
x = [0, 2]
c = [[3], [1], [4]]
bp = BPoly(c, x)
for extrapolate in (True, False, None):
bp = BPoly(c, x, extrapolate=extrapolate)
bp_d = bp.derivative()
if extrapolate is False:
assert_(np.isnan(bp([-0.1, 2.1])).all())
assert_(np.isnan(bp_d([-0.1, 2.1])).all())
else:
assert_(not np.isnan(bp([-0.1, 2.1])).any())
assert_(not np.isnan(bp_d([-0.1, 2.1])).any())
class TestBPolyCalculus(object):
def test_derivative(self):
x = [0, 1, 3]
c = [[3, 0], [0, 0], [0, 2]]
bp = BPoly(c, x) # [3*(1-x)**2, 2*((x-1)/2)**2]
bp_der = bp.derivative()
assert_allclose(bp_der(0.4), -6*(0.6))
assert_allclose(bp_der(1.7), 0.7)
# derivatives in-place
assert_allclose([bp(0.4, nu=1), bp(0.4, nu=2), bp(0.4, nu=3)],
[-6*(1-0.4), 6., 0.])
assert_allclose([bp(1.7, nu=1), bp(1.7, nu=2), bp(1.7, nu=3)],
[0.7, 1., 0])
def test_derivative_ppoly(self):
# make sure it's consistent w/ power basis
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
for d in range(k):
bp = bp.derivative()
pp = pp.derivative()
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(bp(xp), pp(xp))
def test_deriv_inplace(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
# test both real and complex coefficients
for cc in [c.copy(), c*(1. + 2.j)]:
bp = BPoly(cc, x)
xp = np.linspace(x[0], x[-1], 21)
for i in range(k):
assert_allclose(bp(xp, i), bp.derivative(i)(xp))
def test_antiderivative_simple(self):
# f(x) = x for x \in [0, 1),
# (x-1)/2 for x \in [1, 3]
#
# antiderivative is then
# F(x) = x**2 / 2 for x \in [0, 1),
# 0.5*x*(x/2 - 1) + A for x \in [1, 3]
# where A = 3/4 for continuity at x = 1.
x = [0, 1, 3]
c = [[0, 0], [1, 1]]
bp = BPoly(c, x)
bi = bp.antiderivative()
xx = np.linspace(0, 3, 11)
assert_allclose(bi(xx),
np.where(xx < 1, xx**2 / 2.,
0.5 * xx * (xx/2. - 1) + 3./4),
atol=1e-12, rtol=1e-12)
def test_der_antider(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
xx = np.linspace(x[0], x[-1], 100)
assert_allclose(bp.antiderivative().derivative()(xx),
bp(xx), atol=1e-12, rtol=1e-12)
def test_antider_ppoly(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10, 2, 3))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
xx = np.linspace(x[0], x[-1], 10)
assert_allclose(bp.antiderivative(2)(xx),
pp.antiderivative(2)(xx), atol=1e-12, rtol=1e-12)
def test_antider_continuous(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x).antiderivative()
xx = bp.x[1:-1]
assert_allclose(bp(xx - 1e-14),
bp(xx + 1e-14), atol=1e-12, rtol=1e-12)
def test_integrate(self):
np.random.seed(1234)
x = np.sort(np.random.random(11))
c = np.random.random((4, 10))
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
assert_allclose(bp.integrate(0, 1),
pp.integrate(0, 1), atol=1e-12, rtol=1e-12)
def test_integrate_extrap(self):
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
# default is extrapolate=True
assert_allclose(b.integrate(0, 2), 2., atol=1e-14)
# .integrate argument overrides self.extrapolate
b1 = BPoly(c, x, extrapolate=False)
assert_(np.isnan(b1.integrate(0, 2)))
assert_allclose(b1.integrate(0, 2, extrapolate=True), 2., atol=1e-14)
def test_integrate_periodic(self):
x = np.array([1, 2, 4])
c = np.array([[0., 0.], [-1., -1.], [2., -0.], [1., 2.]])
P = BPoly.from_power_basis(PPoly(c, x), extrapolate='periodic')
I = P.antiderivative()
period_int = I(4) - I(1)
assert_allclose(P.integrate(1, 4), period_int)
assert_allclose(P.integrate(-10, -7), period_int)
assert_allclose(P.integrate(-10, -4), 2 * period_int)
assert_allclose(P.integrate(1.5, 2.5), I(2.5) - I(1.5))
assert_allclose(P.integrate(3.5, 5), I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5 + 12, 5 + 12),
I(2) - I(1) + I(4) - I(3.5))
assert_allclose(P.integrate(3.5, 5 + 12),
I(2) - I(1) + I(4) - I(3.5) + 4 * period_int)
assert_allclose(P.integrate(0, -1), I(2) - I(3))
assert_allclose(P.integrate(-9, -10), I(2) - I(3))
assert_allclose(P.integrate(0, -10), I(2) - I(3) - 3 * period_int)
def test_antider_neg(self):
# .derivative(-nu) ==> .andiderivative(nu) and vice versa
c = [[1]]
x = [0, 1]
b = BPoly(c, x)
xx = np.linspace(0, 1, 21)
assert_allclose(b.derivative(-1)(xx), b.antiderivative()(xx),
atol=1e-12, rtol=1e-12)
assert_allclose(b.derivative(1)(xx), b.antiderivative(-1)(xx),
atol=1e-12, rtol=1e-12)
class TestPolyConversions(object):
def test_bp_from_pp(self):
x = [0, 1, 3]
c = [[3, 2], [1, 8], [4, 3]]
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = [0.1, 1.4]
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_bp_from_pp_random(self):
np.random.seed(1234)
m, k = 5, 8 # number of intervals, order
x = np.sort(np.random.random(m))
c = np.random.random((k, m-1))
pp = PPoly(c, x)
bp = BPoly.from_power_basis(pp)
pp1 = PPoly.from_bernstein_basis(bp)
xp = np.linspace(x[0], x[-1], 21)
assert_allclose(pp(xp), bp(xp))
assert_allclose(pp(xp), pp1(xp))
def test_pp_from_bp(self):
x = [0, 1, 3]
c = [[3, 3], [1, 1], [4, 2]]
bp = BPoly(c, x)
pp = PPoly.from_bernstein_basis(bp)
bp1 = BPoly.from_power_basis(pp)
xp = [0.1, 1.4]
assert_allclose(bp(xp), pp(xp))
assert_allclose(bp(xp), bp1(xp))
class TestBPolyFromDerivatives(object):
def test_make_poly_1(self):
c1 = BPoly._construct_from_derivatives(0, 1, [2], [3])
assert_allclose(c1, [2., 3.])
def test_make_poly_2(self):
c1 = BPoly._construct_from_derivatives(0, 1, [1, 0], [1])
assert_allclose(c1, [1., 1., 1.])
# f'(0) = 3
c2 = BPoly._construct_from_derivatives(0, 1, [2, 3], [1])
assert_allclose(c2, [2., 7./2, 1.])
# f'(1) = 3
c3 = BPoly._construct_from_derivatives(0, 1, [2], [1, 3])
assert_allclose(c3, [2., -0.5, 1.])
def test_make_poly_3(self):
# f'(0)=2, f''(0)=3
c1 = BPoly._construct_from_derivatives(0, 1, [1, 2, 3], [4])
assert_allclose(c1, [1., 5./3, 17./6, 4.])
# f'(1)=2, f''(1)=3
c2 = BPoly._construct_from_derivatives(0, 1, [1], [4, 2, 3])
assert_allclose(c2, [1., 19./6, 10./3, 4.])
# f'(0)=2, f'(1)=3
c3 = BPoly._construct_from_derivatives(0, 1, [1, 2], [4, 3])
assert_allclose(c3, [1., 5./3, 3., 4.])
def test_make_poly_12(self):
np.random.seed(12345)
ya = np.r_[0, np.random.random(5)]
yb = np.r_[0, np.random.random(5)]
c = BPoly._construct_from_derivatives(0, 1, ya, yb)
pp = BPoly(c[:, None], [0, 1])
for j in range(6):
assert_allclose([pp(0.), pp(1.)], [ya[j], yb[j]])
pp = pp.derivative()
def test_raise_degree(self):
np.random.seed(12345)
x = [0, 1]
k, d = 8, 5
c = np.random.random((k, 1, 2, 3, 4))
bp = BPoly(c, x)
c1 = BPoly._raise_degree(c, d)
bp1 = BPoly(c1, x)
xp = np.linspace(0, 1, 11)
assert_allclose(bp(xp), bp1(xp))
def test_xi_yi(self):
assert_raises(ValueError, BPoly.from_derivatives, [0, 1], [0])
def test_coords_order(self):
xi = [0, 0, 1]
yi = [[0], [0], [0]]
assert_raises(ValueError, BPoly.from_derivatives, xi, yi)
def test_zeros(self):
xi = [0, 1, 2, 3]
yi = [[0, 0], [0], [0, 0], [0, 0]] # NB: will have to raise the degree
pp = BPoly.from_derivatives(xi, yi)
assert_(pp.c.shape == (4, 3))
ppd = pp.derivative()
for xp in [0., 0.1, 1., 1.1, 1.9, 2., 2.5]:
assert_allclose([pp(xp), ppd(xp)], [0., 0.])
def _make_random_mk(self, m, k):
# k derivatives at each breakpoint
np.random.seed(1234)
xi = np.asarray([1. * j**2 for j in range(m+1)])
yi = [np.random.random(k) for j in range(m+1)]
return xi, yi
def test_random_12(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi)
for order in range(k//2):
assert_allclose(pp(xi), [yy[order] for yy in yi])
pp = pp.derivative()
def test_order_zero(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
assert_raises(ValueError, BPoly.from_derivatives,
**dict(xi=xi, yi=yi, orders=0))
def test_orders_too_high(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
pp = BPoly.from_derivatives(xi, yi, orders=2*k-1) # this is still ok
assert_raises(ValueError, BPoly.from_derivatives, # but this is not
**dict(xi=xi, yi=yi, orders=2*k))
def test_orders_global(self):
m, k = 5, 12
xi, yi = self._make_random_mk(m, k)
# ok, this is confusing. Local polynomials will be of the order 5
# which means that up to the 2nd derivatives will be used at each point
order = 5
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2+1):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
# now repeat with `order` being even: on each interval, it uses
# order//2 'derivatives' @ the right-hand endpoint and
# order//2+1 @ 'derivatives' the left-hand endpoint
order = 6
pp = BPoly.from_derivatives(xi, yi, orders=order)
for j in range(order//2):
assert_allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(xi[1:-1] - 1e-12), pp(xi[1:-1] + 1e-12)))
def test_orders_local(self):
m, k = 7, 12
xi, yi = self._make_random_mk(m, k)
orders = [o + 1 for o in range(m)]
for i, x in enumerate(xi[1:-1]):
pp = BPoly.from_derivatives(xi, yi, orders=orders)
for j in range(orders[i] // 2 + 1):
assert_allclose(pp(x - 1e-12), pp(x + 1e-12))
pp = pp.derivative()
assert_(not np.allclose(pp(x - 1e-12), pp(x + 1e-12)))
def test_yi_trailing_dims(self):
m, k = 7, 5
xi = np.sort(np.random.random(m+1))
yi = np.random.random((m+1, k, 6, 7, 8))
pp = BPoly.from_derivatives(xi, yi)
assert_equal(pp.c.shape, (2*k, m, 6, 7, 8))
def test_gh_5430(self):
# At least one of these raises an error unless gh-5430 is
# fixed. In py2k an int is implemented using a C long, so
# which one fails depends on your system. In py3k there is only
# one arbitrary precision integer type, so both should fail.
orders = np.int32(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = np.int64(1)
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
# This worked before; make sure it still works
p = BPoly.from_derivatives([0, 1], [[0], [0]], orders=orders)
assert_almost_equal(p(0), 0)
orders = 1
class TestNdPPoly(object):
def test_simple_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
xi = np.random.rand(200)
p = NdPPoly(c, (x,))
v1 = p((xi,))
v2 = _ppoly_eval_1(c[:,:,None], x, xi).ravel()
assert_allclose(v1, v2)
def test_simple_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7)
x = np.linspace(0, 1, 6+1)
y = np.linspace(0, 1, 7+1)**2
xi = np.random.rand(200)
yi = np.random.rand(200)
v1 = np.empty([len(xi), 1], dtype=c.dtype)
v1.fill(np.nan)
_ppoly.evaluate_nd(c.reshape(4*5, 6*7, 1),
(x, y),
np.array([4, 5], dtype=np.intc),
np.c_[xi, yi],
np.array([0, 0], dtype=np.intc),
1,
v1)
v1 = v1.ravel()
v2 = _ppoly2d_eval(c, (x, y), xi, yi)
assert_allclose(v1, v2)
p = NdPPoly(c, (x, y))
for nu in (None, (0, 0), (0, 1), (1, 0), (2, 3), (9, 2)):
v1 = p(np.c_[xi, yi], nu=nu)
v2 = _ppoly2d_eval(c, (x, y), xi, yi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
xi = np.random.rand(40)
yi = np.random.rand(40)
zi = np.random.rand(40)
p = NdPPoly(c, (x, y, z))
for nu in (None, (0, 0, 0), (0, 1, 0), (1, 0, 0), (2, 3, 0),
(6, 0, 2)):
v1 = p((xi, yi, zi), nu=nu)
v2 = _ppoly3d_eval(c, (x, y, z), xi, yi, zi, nu=nu)
assert_allclose(v1, v2, err_msg=repr(nu))
def test_simple_4d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9, 10, 11)
x = np.linspace(0, 1, 8+1)
y = np.linspace(0, 1, 9+1)**2
z = np.linspace(0, 1, 10+1)**3
u = np.linspace(0, 1, 11+1)**4
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
ui = np.random.rand(20)
p = NdPPoly(c, (x, y, z, u))
v1 = p((xi, yi, zi, ui))
v2 = _ppoly4d_eval(c, (x, y, z, u), xi, yi, zi, ui)
assert_allclose(v1, v2)
def test_deriv_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5)
x = np.linspace(0, 1, 5+1)
p = NdPPoly(c, (x,))
# derivative
dp = p.derivative(nu=[1])
p1 = PPoly(c, x)
dp1 = p1.derivative()
assert_allclose(dp.c, dp1.c)
# antiderivative
dp = p.antiderivative(nu=[2])
p1 = PPoly(c, x)
dp1 = p1.antiderivative(2)
assert_allclose(dp.c, dp1.c)
def test_deriv_3d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 7, 8, 9)
x = np.linspace(0, 1, 7+1)
y = np.linspace(0, 1, 8+1)**2
z = np.linspace(0, 1, 9+1)**3
p = NdPPoly(c, (x, y, z))
# differentiate vs x
p1 = PPoly(c.transpose(0, 3, 1, 2, 4, 5), x)
dp = p.derivative(nu=[2])
dp1 = p1.derivative(2)
assert_allclose(dp.c,
dp1.c.transpose(0, 2, 3, 1, 4, 5))
# antidifferentiate vs y
p1 = PPoly(c.transpose(1, 4, 0, 2, 3, 5), y)
dp = p.antiderivative(nu=[0, 1, 0])
dp1 = p1.antiderivative(1)
assert_allclose(dp.c,
dp1.c.transpose(2, 0, 3, 4, 1, 5))
# differentiate vs z
p1 = PPoly(c.transpose(2, 5, 0, 1, 3, 4), z)
dp = p.derivative(nu=[0, 0, 3])
dp1 = p1.derivative(3)
assert_allclose(dp.c,
dp1.c.transpose(2, 3, 0, 4, 5, 1))
def test_deriv_3d_simple(self):
# Integrate to obtain function x y**2 z**4 / (2! 4!)
c = np.ones((1, 1, 1, 3, 4, 5))
x = np.linspace(0, 1, 3+1)**1
y = np.linspace(0, 1, 4+1)**2
z = np.linspace(0, 1, 5+1)**3
p = NdPPoly(c, (x, y, z))
ip = p.antiderivative((1, 0, 4))
ip = ip.antiderivative((0, 2, 0))
xi = np.random.rand(20)
yi = np.random.rand(20)
zi = np.random.rand(20)
assert_allclose(ip((xi, yi, zi)),
xi * yi**2 * zi**4 / (gamma(3)*gamma(5)))
def test_integrate_2d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 16, 17)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
# make continuously differentiable so that nquad() has an
# easier time
c = c.transpose(0, 2, 1, 3)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, x, 2)
c = cx.reshape(c.shape)
c = c.transpose(0, 2, 1, 3)
c = c.transpose(1, 3, 0, 2)
cx = c.reshape(c.shape[0], c.shape[1], -1).copy()
_ppoly.fix_continuity(cx, y, 2)
c = cx.reshape(c.shape)
c = c.transpose(2, 0, 3, 1).copy()
# Check integration
p = NdPPoly(c, (x, y))
for ranges in [[(0, 1), (0, 1)],
[(0, 0.5), (0, 1)],
[(0, 1), (0, 0.5)],
[(0.3, 0.7), (0.6, 0.2)]]:
ig = p.integrate(ranges)
ig2, err2 = nquad(lambda x, y: p((x, y)), ranges,
opts=[dict(epsrel=1e-5, epsabs=1e-5)]*2)
assert_allclose(ig, ig2, rtol=1e-5, atol=1e-5,
err_msg=repr(ranges))
def test_integrate_1d(self):
np.random.seed(1234)
c = np.random.rand(4, 5, 6, 16, 17, 18)
x = np.linspace(0, 1, 16+1)**1
y = np.linspace(0, 1, 17+1)**2
z = np.linspace(0, 1, 18+1)**3
# Check 1D integration
p = NdPPoly(c, (x, y, z))
u = np.random.rand(200)
v = np.random.rand(200)
a, b = 0.2, 0.7
px = p.integrate_1d(a, b, axis=0)
pax = p.antiderivative((1, 0, 0))
assert_allclose(px((u, v)), pax((b, u, v)) - pax((a, u, v)))
py = p.integrate_1d(a, b, axis=1)
pay = p.antiderivative((0, 1, 0))
assert_allclose(py((u, v)), pay((u, b, v)) - pay((u, a, v)))
pz = p.integrate_1d(a, b, axis=2)
paz = p.antiderivative((0, 0, 1))
assert_allclose(pz((u, v)), paz((u, v, b)) - paz((u, v, a)))
def _ppoly_eval_1(c, x, xps):
"""Evaluate piecewise polynomial manually"""
out = np.zeros((len(xps), c.shape[2]))
for i, xp in enumerate(xps):
if xp < 0 or xp > 1:
out[i,:] = np.nan
continue
j = np.searchsorted(x, xp) - 1
d = xp - x[j]
assert_(x[j] <= xp < x[j+1])
r = sum(c[k,j] * d**(c.shape[0]-k-1)
for k in range(c.shape[0]))
out[i,:] = r
return out
def _ppoly_eval_2(coeffs, breaks, xnew, fill=np.nan):
"""Evaluate piecewise polynomial manually (another way)"""
a = breaks[0]
b = breaks[-1]
K = coeffs.shape[0]
saveshape = np.shape(xnew)
xnew = np.ravel(xnew)
res = np.empty_like(xnew)
mask = (xnew >= a) & (xnew <= b)
res[~mask] = fill
xx = xnew.compress(mask)
indxs = np.searchsorted(breaks, xx)-1
indxs = indxs.clip(0, len(breaks))
pp = coeffs
diff = xx - breaks.take(indxs)
V = np.vander(diff, N=K)
values = np.array([np.dot(V[k, :], pp[:, indxs[k]]) for k in xrange(len(xx))])
res[mask] = values
res.shape = saveshape
return res
def _dpow(x, y, n):
"""
d^n (x**y) / dx^n
"""
if n < 0:
raise ValueError("invalid derivative order")
elif n > y:
return 0
else:
return poch(y - n + 1, n) * x**(y - n)
def _ppoly2d_eval(c, xs, xnew, ynew, nu=None):
"""
Straightforward evaluation of 2D piecewise polynomial
"""
if nu is None:
nu = (0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny = c.shape[:2]
for jout, (x, y) in enumerate(zip(xnew, ynew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
val += (c[nx-k1-1,ny-k2-1,j1,j2]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1]))
out[jout] = val
return out
def _ppoly3d_eval(c, xs, xnew, ynew, znew, nu=None):
"""
Straightforward evaluation of 3D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
nx, ny, nz = c.shape[:3]
for jout, (x, y, z) in enumerate(zip(xnew, ynew, znew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
val += (c[nx-k1-1,ny-k2-1,nz-k3-1,j1,j2,j3]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2]))
out[jout] = val
return out
def _ppoly4d_eval(c, xs, xnew, ynew, znew, unew, nu=None):
"""
Straightforward evaluation of 4D piecewise polynomial
"""
if nu is None:
nu = (0, 0, 0, 0)
out = np.empty((len(xnew),), dtype=c.dtype)
mx, my, mz, mu = c.shape[:4]
for jout, (x, y, z, u) in enumerate(zip(xnew, ynew, znew, unew)):
if not ((xs[0][0] <= x <= xs[0][-1]) and
(xs[1][0] <= y <= xs[1][-1]) and
(xs[2][0] <= z <= xs[2][-1]) and
(xs[3][0] <= u <= xs[3][-1])):
out[jout] = np.nan
continue
j1 = np.searchsorted(xs[0], x) - 1
j2 = np.searchsorted(xs[1], y) - 1
j3 = np.searchsorted(xs[2], z) - 1
j4 = np.searchsorted(xs[3], u) - 1
s1 = x - xs[0][j1]
s2 = y - xs[1][j2]
s3 = z - xs[2][j3]
s4 = u - xs[3][j4]
val = 0
for k1 in range(c.shape[0]):
for k2 in range(c.shape[1]):
for k3 in range(c.shape[2]):
for k4 in range(c.shape[3]):
val += (c[mx-k1-1,my-k2-1,mz-k3-1,mu-k4-1,j1,j2,j3,j4]
* _dpow(s1, k1, nu[0])
* _dpow(s2, k2, nu[1])
* _dpow(s3, k3, nu[2])
* _dpow(s4, k4, nu[3]))
out[jout] = val
return out
class TestRegularGridInterpolator(object):
def _get_sample_4d(self):
# create a 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 4
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def _get_sample_4d_2(self):
# create another 4d grid of 3 points in each dimension
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_list_input(self):
points, values = self._get_sample_4d()
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points,
values.tolist(),
method=method)
v1 = interp(sample.tolist())
interp = RegularGridInterpolator(points,
values,
method=method)
v2 = interp(sample)
assert_allclose(v1, v2)
def test_complex(self):
points, values = self._get_sample_4d()
values = values - 2j*values
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
for method in ['linear', 'nearest']:
interp = RegularGridInterpolator(points, values,
method=method)
rinterp = RegularGridInterpolator(points, values.real,
method=method)
iinterp = RegularGridInterpolator(points, values.imag,
method=method)
v1 = interp(sample)
v2 = rinterp(sample) + 1j*iinterp(sample)
assert_allclose(v1, v2)
def test_linear_xi1d(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([0.1, 0.1, 10., 9.])
wanted = 1001.1
assert_array_almost_equal(interp(sample), wanted)
def test_linear_xi3d(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([0.1, 0.1, .9, .9])
wanted = 1100.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.1, 0.1, 0.1])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0., 0., 0., 0.])
wanted = 0.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([1., 1., 1., 1.])
wanted = 1111.
assert_array_almost_equal(interp(sample), wanted)
sample = np.asarray([0.1, 0.4, 0.6, 0.9])
wanted = 1055.
assert_array_almost_equal(interp(sample), wanted)
def test_linear_edges(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
wanted = np.asarray([0., 1111.])
assert_array_almost_equal(interp(sample), wanted)
def test_valid_create(self):
# create a 2d grid of 3 points in each dimension
points = [(0., .5, 1.), (0., 1., .5)]
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis]
values1 = values[np.newaxis, :]
values = (values0 + values1 * 10)
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [((0., .5, 1.), ), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, .75, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values)
points = [(0., .5, 1.), (0., .5, 1.)]
assert_raises(ValueError, RegularGridInterpolator, points, values,
method="undefmethod")
def test_valid_call(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.]])
assert_raises(ValueError, interp, sample, "undefmethod")
sample = np.asarray([[0., 0., 0.], [1., 1., 1.]])
assert_raises(ValueError, interp, sample)
sample = np.asarray([[0., 0., 0., 0.], [1., 1., 1., 1.1]])
assert_raises(ValueError, interp, sample)
def test_out_of_bounds_extrap(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 1111., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-111.1, 1222.1, -11068., -1186.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_extrap2(self):
points, values = self._get_sample_4d_2()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=None)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[21, 2.1, -1.1, -11], [2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([0., 11., 11., 11.])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
wanted = np.asarray([-12.1, 133.1, -1069., -97.9])
assert_array_almost_equal(interp(sample, method="linear"), wanted)
def test_out_of_bounds_fill(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, bounds_error=False,
fill_value=np.nan)
sample = np.asarray([[-.1, -.1, -.1, -.1], [1.1, 1.1, 1.1, 1.1],
[2.1, 2.1, -1.1, -1.1]])
wanted = np.asarray([np.nan, np.nan, np.nan])
assert_array_almost_equal(interp(sample, method="nearest"), wanted)
assert_array_almost_equal(interp(sample, method="linear"), wanted)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
wanted = np.asarray([1001.1, 846.2, 555.5])
assert_array_almost_equal(interp(sample), wanted)
def test_nearest_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values, method="nearest")
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = NearestNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_linear_compare_qhull(self):
points, values = self._get_sample_4d()
interp = RegularGridInterpolator(points, values)
points_qhull = itertools.product(*points)
points_qhull = [p for p in points_qhull]
points_qhull = np.asarray(points_qhull)
values_qhull = values.reshape(-1)
interp_qhull = LinearNDInterpolator(points_qhull, values_qhull)
sample = np.asarray([[0.1, 0.1, 1., .9], [0.2, 0.1, .45, .8],
[0.5, 0.5, .5, .5]])
assert_array_almost_equal(interp(sample), interp_qhull(sample))
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
interp = RegularGridInterpolator((x, y), values,
method=method)
v1 = interp([0.4, 0.7])
interp = RegularGridInterpolator((x, y), values._v,
method=method)
v2 = interp([0.4, 0.7])
assert_allclose(v1, v2)
def test_invalid_fill_value(self):
np.random.seed(1234)
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.random.rand(5, 7)
# integers can be cast to floats
RegularGridInterpolator((x, y), values, fill_value=1)
# complex values cannot
assert_raises(ValueError, RegularGridInterpolator,
(x, y), values, fill_value=1+2j)
def test_fillvalue_type(self):
# from #3703; test that interpolator object construction succeeds
values = np.ones((10, 20, 30), dtype='>f4')
points = [np.arange(n) for n in values.shape]
xi = [(1, 1, 1)]
interpolator = RegularGridInterpolator(points, values)
interpolator = RegularGridInterpolator(points, values, fill_value=0.)
class MyValue(object):
"""
Minimal indexable object
"""
def __init__(self, shape):
self.ndim = 2
self.shape = shape
self._v = np.arange(np.prod(shape)).reshape(shape)
def __getitem__(self, idx):
return self._v[idx]
def __array_interface__(self):
return None
def __array__(self):
raise RuntimeError("No array representation")
class TestInterpN(object):
def _sample_2d_data(self):
x = np.arange(1, 6)
x = np.array([.5, 2., 3., 4., 5.5])
y = np.arange(1, 6)
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
return x, y, z
def test_spline_2d(self):
x, y, z = self._sample_2d_data()
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
assert_array_almost_equal(interpn((x, y), z, xi, method="splinef2d"),
lut.ev(xi[:, 0], xi[:, 1]))
def test_list_input(self):
x, y, z = self._sample_2d_data()
xi = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['nearest', 'linear', 'splinef2d']:
v1 = interpn((x, y), z, xi, method=method)
v2 = interpn((x.tolist(), y.tolist()), z.tolist(),
xi.tolist(), method=method)
assert_allclose(v1, v2, err_msg=method)
def test_spline_2d_outofbounds(self):
x = np.array([.5, 2., 3., 4., 5.5])
y = np.array([.5, 2., 3., 4., 5.5])
z = np.array([[1, 2, 1, 2, 1], [1, 2, 1, 2, 1], [1, 2, 3, 2, 1],
[1, 2, 2, 2, 1], [1, 2, 1, 2, 1]])
lut = RectBivariateSpline(x, y, z)
xi = np.array([[1, 2.3, 6.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, -4.0, 5.0, 1.0, 3]]).T
actual = interpn((x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=999.99)
expected = lut.ev(xi[:, 0], xi[:, 1])
expected[2:4] = 999.99
assert_array_almost_equal(actual, expected)
# no extrapolation for splinef2d
assert_raises(ValueError, interpn, (x, y), z, xi, method="splinef2d",
bounds_error=False, fill_value=None)
def _sample_4d_data(self):
points = [(0., .5, 1.)] * 2 + [(0., 5., 10.)] * 2
values = np.asarray([0., .5, 1.])
values0 = values[:, np.newaxis, np.newaxis, np.newaxis]
values1 = values[np.newaxis, :, np.newaxis, np.newaxis]
values2 = values[np.newaxis, np.newaxis, :, np.newaxis]
values3 = values[np.newaxis, np.newaxis, np.newaxis, :]
values = (values0 + values1 * 10 + values2 * 100 + values3 * 1000)
return points, values
def test_linear_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values)
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="linear")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_linear_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="linear",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_nearest_4d(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
interp_rg = RegularGridInterpolator(points, values, method="nearest")
sample = np.asarray([[0.1, 0.1, 10., 9.]])
wanted = interpn(points, values, sample, method="nearest")
assert_array_almost_equal(interp_rg(sample), wanted)
def test_4d_nearest_outofbounds(self):
# create a 4d grid of 3 points in each dimension
points, values = self._sample_4d_data()
sample = np.asarray([[0.1, -0.1, 10.1, 9.]])
wanted = 999.99
actual = interpn(points, values, sample, method="nearest",
bounds_error=False, fill_value=999.99)
assert_array_almost_equal(actual, wanted)
def test_xi_1d(self):
# verify that 1D xi works as expected
points, values = self._sample_4d_data()
sample = np.asarray([0.1, 0.1, 10., 9.])
v1 = interpn(points, values, sample, bounds_error=False)
v2 = interpn(points, values, sample[None,:], bounds_error=False)
assert_allclose(v1, v2)
def test_xi_nd(self):
# verify that higher-d xi works as expected
points, values = self._sample_4d_data()
np.random.seed(1234)
sample = np.random.rand(2, 3, 4)
v1 = interpn(points, values, sample, method='nearest',
bounds_error=False)
assert_equal(v1.shape, (2, 3))
v2 = interpn(points, values, sample.reshape(-1, 4),
method='nearest', bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_xi_broadcast(self):
# verify that the interpolators broadcast xi
x, y, values = self._sample_2d_data()
points = (x, y)
xi = np.linspace(0, 1, 2)
yi = np.linspace(0, 3, 3)
for method in ['nearest', 'linear', 'splinef2d']:
sample = (xi[:,None], yi[None,:])
v1 = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v1.shape, (2, 3))
xx, yy = np.meshgrid(xi, yi)
sample = np.c_[xx.T.ravel(), yy.T.ravel()]
v2 = interpn(points, values, sample,
method=method, bounds_error=False)
assert_allclose(v1, v2.reshape(v1.shape))
def test_nonscalar_values(self):
# Verify that non-scalar valued values also works
points, values = self._sample_4d_data()
np.random.seed(1234)
values = np.random.rand(3, 3, 3, 3, 6)
sample = np.random.rand(7, 11, 4)
for method in ['nearest', 'linear']:
v = interpn(points, values, sample, method=method,
bounds_error=False)
assert_equal(v.shape, (7, 11, 6), err_msg=method)
vs = [interpn(points, values[...,j], sample, method=method,
bounds_error=False)
for j in range(6)]
v2 = np.array(vs).transpose(1, 2, 0)
assert_allclose(v, v2, err_msg=method)
# Vector-valued splines supported with fitpack
assert_raises(ValueError, interpn, points, values, sample,
method='splinef2d')
def test_complex(self):
x, y, values = self._sample_2d_data()
points = (x, y)
values = values - 2j*values
sample = np.array([[1, 2.3, 5.3, 0.5, 3.3, 1.2, 3],
[1, 3.3, 1.2, 4.0, 5.0, 1.0, 3]]).T
for method in ['linear', 'nearest']:
v1 = interpn(points, values, sample, method=method)
v2r = interpn(points, values.real, sample, method=method)
v2i = interpn(points, values.imag, sample, method=method)
v2 = v2r + 1j*v2i
assert_allclose(v1, v2)
# Complex-valued data not supported by spline2fd
_assert_warns(np.ComplexWarning, interpn, points, values,
sample, method='splinef2d')
def test_duck_typed_values(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = MyValue((5, 7))
for method in ('nearest', 'linear'):
v1 = interpn((x, y), values, [0.4, 0.7], method=method)
v2 = interpn((x, y), values._v, [0.4, 0.7], method=method)
assert_allclose(v1, v2)
def test_matrix_input(self):
x = np.linspace(0, 2, 5)
y = np.linspace(0, 1, 7)
values = np.matrix(np.random.rand(5, 7))
sample = np.random.rand(3, 7, 2)
for method in ('nearest', 'linear', 'splinef2d'):
v1 = interpn((x, y), values, sample, method=method)
v2 = interpn((x, y), np.asarray(values), sample, method=method)
assert_allclose(v1, np.asmatrix(v2))
|
mbayon/TFG-MachineLearning
|
venv/lib/python3.6/site-packages/scipy/interpolate/tests/test_interpolate.py
|
Python
|
mit
| 102,313
|
# -*- coding: utf-8 -*-
#
# Rule
#
# Blueprint for rule administration.
#
# Created by dp on 2014-12-25.
# ================================================================================ #
from flask.blueprints import Blueprint
from flask.globals import g
from wtforms.fields.core import SelectField
from wtforms.fields.simple import TextField
from wtforms.validators import DataRequired
from core.navigation.menu import menubar, contextmenu
from core.security.role import Role
from core.security.rule import Rule
from core.rendering import DefaultForm, render, create_form, mismatch, delete_form, \
update_form
from core.utility.localization import localize
blueprint = Blueprint("rule-controller", __name__)
# Forms
# -------------------------------------------------------------------------------- #
class FormRule(DefaultForm):
route = TextField(localize("core", "rules.field_route"),
validators = [DataRequired()])
role_id = SelectField(localize("core", "rules.field_role"),
coerce = int)
insert = SelectField(localize("core", "rules.field_insert"),
choices = [(item, item) for item in Rule.permissions])
remove = SelectField(localize("core", "rules.field_remove"),
choices = [(item, item) for item in Rule.permissions])
change = SelectField(localize("core", "rules.field_change"),
choices = [(item, item) for item in Rule.permissions])
view = SelectField(localize("core", "rules.field_view"),
choices = [(item, item) for item in Rule.permissions])
# Default route: View a list of all rules
# -------------------------------------------------------------------------------- #
@blueprint.route("/rules", methods = ["GET"])
def entries():
navigation = menubar("administration", g.role.id)
items = Rule.all()
actions = menubar("rule", g.role.id)
for item in items: item.actions = contextmenu("rule", g.role.id)
return render("core/administration/rule-list.html", navigation = navigation,
items = items, actions = actions)
# Create Rule
# -------------------------------------------------------------------------------- #
@blueprint.route("/rules/create", methods = ["GET", "POST"])
def create():
navigation = menubar("administration", g.role.id)
item = Rule()
form = FormRule()
form.role_id.choices = [(role.id, role.name) for role in Role.all()]
headline = localize("core", "rules.create_headline")
message = localize("core", "rules.create_success")
return create_form(item, form, headline, message, "/rules",
template = "core/administration/rule-form.html",
navigation = navigation)
# Delete Rule
# -------------------------------------------------------------------------------- #
@blueprint.route("/rules/<identifier>/delete", methods = ["GET", "POST"])
def delete(identifier):
navigation = menubar("administration", g.role.id)
item = Rule.get(int(identifier))
if not item: return mismatch()
headline = localize("core", "rules.delete_headline")
text = localize("core", "rules.delete_description") % (item.route)
message = localize("core", "rules.delete_success")
return delete_form(item, headline, text, message, "/rules",
template = "core/administration/confirm.html",
navigation = navigation)
# Edit Rule
# -------------------------------------------------------------------------------- #
@blueprint.route("/rules/<identifier>/update", methods = ["GET", "POST"])
def update(identifier):
navigation = menubar("administration", g.role.id)
item = Rule.get(int(identifier))
if not item: return mismatch()
form = FormRule(obj = item)
form.role_id.choices = [(role.id, role.name) for role in Role.all()]
headline = localize("core", "rules.update_headline")
message = localize("core", "rules.update_success")
return update_form(item, form, headline, message, "/rules",
template = "core/administration/rule-form.html",
navigation = navigation)
|
dpetter/Eowyne
|
src/core/administration/rules.py
|
Python
|
gpl-2.0
| 4,258
|
"""Functions to plot M/EEG data e.g. topographies
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
import math
import copy
from functools import partial
import numpy as np
from scipy import linalg
from ..baseline import rescale
from ..io.constants import FIFF
from ..io.pick import pick_types
from ..utils import _clean_names, _time_mask, verbose, logger
from .utils import (tight_layout, _setup_vmin_vmax, _prepare_trellis,
_check_delayed_ssp, _draw_proj_checkbox, figure_nobar,
plt_show)
from ..time_frequency import compute_epochs_psd
from ..defaults import _handle_default
from ..channels.layout import _find_topomap_coords
from ..fixes import _get_argrelmax
from ..externals.six import string_types
def _prepare_topo_plot(inst, ch_type, layout):
""""Aux Function"""
info = copy.deepcopy(inst.info)
if layout is None and ch_type is not 'eeg':
from ..channels import find_layout
layout = find_layout(info)
elif layout == 'auto':
layout = None
info['ch_names'] = _clean_names(info['ch_names'])
for ii, this_ch in enumerate(info['chs']):
this_ch['ch_name'] = info['ch_names'][ii]
# special case for merging grad channels
if (ch_type == 'grad' and FIFF.FIFFV_COIL_VV_PLANAR_T1 in
np.unique([ch['coil_type'] for ch in info['chs']])):
from ..channels.layout import _pair_grad_sensors
picks, pos = _pair_grad_sensors(info, layout)
merge_grads = True
else:
merge_grads = False
if ch_type == 'eeg':
picks = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
else:
picks = pick_types(info, meg=ch_type, ref_meg=False,
exclude='bads')
if len(picks) == 0:
raise ValueError("No channels of type %r" % ch_type)
if layout is None:
pos = _find_topomap_coords(info, picks)
else:
names = [n.upper() for n in layout.names]
pos = list()
for pick in picks:
this_name = info['ch_names'][pick].upper()
if this_name in names:
pos.append(layout.pos[names.index(this_name)])
else:
logger.warning('Failed to locate %s channel positions from'
' layout. Inferring channel positions from '
'data.' % ch_type)
pos = _find_topomap_coords(info, picks)
break
ch_names = [info['ch_names'][k] for k in picks]
if merge_grads:
# change names so that vectorview combined grads appear as MEG014x
# instead of MEG0142 or MEG0143 which are the 2 planar grads.
ch_names = [ch_names[k][:-1] + 'x' for k in range(0, len(ch_names), 2)]
pos = np.array(pos)[:, :2] # 2D plot, otherwise interpolation bugs
return picks, pos, merge_grads, ch_names, ch_type
def _plot_update_evoked_topomap(params, bools):
""" Helper to update topomaps """
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
new_evoked = params['evoked'].copy()
new_evoked.info['projs'] = []
new_evoked.add_proj(projs)
new_evoked.apply_proj()
data = new_evoked.data[np.ix_(params['picks'],
params['time_idx'])] * params['scale']
if params['merge_grads']:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
image_mask = params['image_mask']
pos_x, pos_y = np.asarray(params['pos'])[:, :2].T
xi = np.linspace(pos_x.min(), pos_x.max(), params['res'])
yi = np.linspace(pos_y.min(), pos_y.max(), params['res'])
Xi, Yi = np.meshgrid(xi, yi)
for ii, im in enumerate(params['images']):
Zi = _griddata(pos_x, pos_y, data[:, ii], Xi, Yi)
Zi[~image_mask] = np.nan
im.set_data(Zi)
for cont in params['contours']:
cont.set_array(np.c_[Xi, Yi, Zi])
params['fig'].canvas.draw()
def plot_projs_topomap(projs, layout=None, cmap='RdBu_r', sensors=True,
colorbar=False, res=64, size=1, show=True,
outlines='head', contours=6, image_interp='bilinear',
axes=None):
"""Plot topographic maps of SSP projections
Parameters
----------
projs : list of Projection
The projections
layout : None | Layout | list of Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). Or a list of Layout if projections
are from different sensor types.
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
res : int
The resolution of the topomap image (n pixels along each side).
size : scalar
Side length of the topomaps in inches (only applies when plotting
multiple topomaps at a time).
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of projectors. If instance of Axes,
there must be only one projector. Defaults to None.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
Notes
-----
.. versionadded:: 0.9.0
"""
import matplotlib.pyplot as plt
if layout is None:
from ..channels import read_layout
layout = read_layout('Vectorview-all')
if not isinstance(layout, list):
layout = [layout]
n_projs = len(projs)
nrows = math.floor(math.sqrt(n_projs))
ncols = math.ceil(n_projs / nrows)
if axes is None:
plt.figure()
axes = list()
for idx in range(len(projs)):
ax = plt.subplot(nrows, ncols, idx + 1)
axes.append(ax)
elif isinstance(axes, plt.Axes):
axes = [axes]
if len(axes) != len(projs):
raise RuntimeError('There must be an axes for each picked projector.')
for proj_idx, proj in enumerate(projs):
axes[proj_idx].set_title(proj['desc'][:10] + '...')
ch_names = _clean_names(proj['data']['col_names'])
data = proj['data']['data'].ravel()
idx = []
for l in layout:
is_vv = l.kind.startswith('Vectorview')
if is_vv:
from ..channels.layout import _pair_grad_sensors_from_ch_names
grad_pairs = _pair_grad_sensors_from_ch_names(ch_names)
if grad_pairs:
ch_names = [ch_names[i] for i in grad_pairs]
idx = [l.names.index(c) for c in ch_names if c in l.names]
if len(idx) == 0:
continue
pos = l.pos[idx]
if is_vv and grad_pairs:
from ..channels.layout import _merge_grad_data
shape = (len(idx) // 2, 2, -1)
pos = pos.reshape(shape).mean(axis=1)
data = _merge_grad_data(data[grad_pairs]).ravel()
break
if len(idx):
plot_topomap(data, pos[:, :2], vmax=None, cmap=cmap,
sensors=sensors, res=res, axis=axes[proj_idx],
outlines=outlines, contours=contours,
image_interp=image_interp, show=False)
if colorbar:
plt.colorbar()
else:
raise RuntimeError('Cannot find a proper layout for projection %s'
% proj['desc'])
tight_layout(fig=axes[0].get_figure())
plt_show(show)
return axes[0].get_figure()
def _check_outlines(pos, outlines, head_pos=None):
"""Check or create outlines for topoplot
"""
pos = np.array(pos, float)[:, :2] # ensure we have a copy
head_pos = dict() if head_pos is None else head_pos
if not isinstance(head_pos, dict):
raise TypeError('head_pos must be dict or None')
head_pos = copy.deepcopy(head_pos)
for key in head_pos.keys():
if key not in ('center', 'scale'):
raise KeyError('head_pos must only contain "center" and '
'"scale"')
head_pos[key] = np.array(head_pos[key], float)
if head_pos[key].shape != (2,):
raise ValueError('head_pos["%s"] must have shape (2,), not '
'%s' % (key, head_pos[key].shape))
if outlines in ('head', 'skirt', None):
radius = 0.5
l = np.linspace(0, 2 * np.pi, 101)
head_x = np.cos(l) * radius
head_y = np.sin(l) * radius
nose_x = np.array([0.18, 0, -0.18]) * radius
nose_y = np.array([radius - .004, radius * 1.15, radius - .004])
ear_x = np.array([.497, .510, .518, .5299, .5419, .54, .547,
.532, .510, .489])
ear_y = np.array([.0555, .0775, .0783, .0746, .0555, -.0055, -.0932,
-.1313, -.1384, -.1199])
# shift and scale the electrode positions
if 'center' not in head_pos:
head_pos['center'] = 0.5 * (pos.max(axis=0) + pos.min(axis=0))
pos -= head_pos['center']
if outlines is not None:
# Define the outline of the head, ears and nose
outlines_dict = dict(head=(head_x, head_y), nose=(nose_x, nose_y),
ear_left=(ear_x, ear_y),
ear_right=(-ear_x, ear_y))
else:
outlines_dict = dict()
if outlines == 'skirt':
if 'scale' not in head_pos:
# By default, fit electrodes inside the head circle
head_pos['scale'] = 1.0 / (pos.max(axis=0) - pos.min(axis=0))
pos *= head_pos['scale']
# Make the figure encompass slightly more than all points
mask_scale = 1.25 * (pos.max(axis=0) - pos.min(axis=0))
outlines_dict['autoshrink'] = False
outlines_dict['mask_pos'] = (mask_scale[0] * head_x,
mask_scale[1] * head_y)
outlines_dict['clip_radius'] = (mask_scale / 2.)
else:
if 'scale' not in head_pos:
# The default is to make the points occupy a slightly smaller
# proportion (0.85) of the total width and height
# this number was empirically determined (seems to work well)
head_pos['scale'] = 0.85 / (pos.max(axis=0) - pos.min(axis=0))
pos *= head_pos['scale']
outlines_dict['autoshrink'] = True
outlines_dict['mask_pos'] = head_x, head_y
outlines_dict['clip_radius'] = (0.5, 0.5)
outlines = outlines_dict
elif isinstance(outlines, dict):
if 'mask_pos' not in outlines:
raise ValueError('You must specify the coordinates of the image'
'mask')
else:
raise ValueError('Invalid value for `outlines')
return pos, outlines
def _griddata(x, y, v, xi, yi):
"""Aux function"""
xy = x.ravel() + y.ravel() * -1j
d = xy[None, :] * np.ones((len(xy), 1))
d = np.abs(d - d.T)
n = d.shape[0]
d.flat[::n + 1] = 1.
g = (d * d) * (np.log(d) - 1.)
g.flat[::n + 1] = 0.
weights = linalg.solve(g, v.ravel())
m, n = xi.shape
zi = np.zeros_like(xi)
xy = xy.T
g = np.empty(xy.shape)
for i in range(m):
for j in range(n):
d = np.abs(xi[i, j] + -1j * yi[i, j] - xy)
mask = np.where(d == 0)[0]
if len(mask):
d[mask] = 1.
np.log(d, out=g)
g -= 1.
g *= d * d
if len(mask):
g[mask] = 0.
zi[i, j] = g.dot(weights)
return zi
def _plot_sensors(pos_x, pos_y, sensors, ax):
"""Aux function"""
from matplotlib.patches import Circle
if sensors is True:
for x, y in zip(pos_x, pos_y):
ax.add_artist(Circle(xy=(x, y), radius=0.003, color='k'))
else:
ax.plot(pos_x, pos_y, sensors)
def plot_topomap(data, pos, vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
res=64, axis=None, names=None, show_names=False, mask=None,
mask_params=None, outlines='head', image_mask=None,
contours=6, image_interp='bilinear', show=True,
head_pos=None, onselect=None):
"""Plot a topographic map as image
Parameters
----------
data : array, length = n_points
The data values to plot.
pos : array, shape = (n_points, 2)
For each data point, the x and y coordinates.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
res : int
The resolution of the topomap image (n pixels along each side).
axis : instance of Axis | None
The axis to plot to. If None, the current axis will be used.
names : list | None
List of channel names. If None, channel names are not plotted.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indices set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
image_mask : ndarray of bool, shape (res, res) | None
The image mask to cover the interpolated surface. If None, it will be
computed from the outline.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
show : bool
Show figure if True.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
onselect : callable | None
Handle for a function that is called when the user selects a set of
channels by rectangle selection (matplotlib ``RectangleSelector``). If
None interactive selection is disabled. Defaults to None.
Returns
-------
im : matplotlib.image.AxesImage
The interpolated data.
cn : matplotlib.contour.ContourSet
The fieldlines.
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import RectangleSelector
data = np.asarray(data)
if data.ndim > 1:
raise ValueError("Data needs to be array of shape (n_sensors,); got "
"shape %s." % str(data.shape))
# Give a helpful error message for common mistakes regarding the position
# matrix.
pos_help = ("Electrode positions should be specified as a 2D array with "
"shape (n_channels, 2). Each row in this matrix contains the "
"(x, y) position of an electrode.")
if pos.ndim != 2:
error = ("{ndim}D array supplied as electrode positions, where a 2D "
"array was expected").format(ndim=pos.ndim)
raise ValueError(error + " " + pos_help)
elif pos.shape[1] == 3:
error = ("The supplied electrode positions matrix contains 3 columns. "
"Are you trying to specify XYZ coordinates? Perhaps the "
"mne.channels.create_eeg_layout function is useful for you.")
raise ValueError(error + " " + pos_help)
# No error is raised in case of pos.shape[1] == 4. In this case, it is
# assumed the position matrix contains both (x, y) and (width, height)
# values, such as Layout.pos.
elif pos.shape[1] == 1 or pos.shape[1] > 4:
raise ValueError(pos_help)
if len(data) != len(pos):
raise ValueError("Data and pos need to be of same length. Got data of "
"length %s, pos of length %s" % (len(data), len(pos)))
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
pos, outlines = _check_outlines(pos, outlines, head_pos)
pos_x = pos[:, 0]
pos_y = pos[:, 1]
ax = axis if axis else plt.gca()
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
if any([not pos_y.any(), not pos_x.any()]):
raise RuntimeError('No position information found, cannot compute '
'geometries for topomap.')
if outlines is None:
xmin, xmax = pos_x.min(), pos_x.max()
ymin, ymax = pos_y.min(), pos_y.max()
else:
xlim = np.inf, -np.inf,
ylim = np.inf, -np.inf,
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[xlim[0], mask_[:, 0]]),
np.max(np.r_[xlim[1], mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[ylim[0], mask_[:, 1]]),
np.max(np.r_[ylim[1], mask_[:, 1]]))
# interpolate data
xi = np.linspace(xmin, xmax, res)
yi = np.linspace(ymin, ymax, res)
Xi, Yi = np.meshgrid(xi, yi)
Zi = _griddata(pos_x, pos_y, data, Xi, Yi)
if outlines is None:
_is_default_outlines = False
elif isinstance(outlines, dict):
_is_default_outlines = any(k.startswith('head') for k in outlines)
if _is_default_outlines and image_mask is None:
# prepare masking
image_mask, pos = _make_image_mask(outlines, pos, res)
mask_params = _handle_default('mask_params', mask_params)
# plot outline
linewidth = mask_params['markeredgewidth']
patch = None
if 'patch' in outlines:
patch = outlines['patch']
patch_ = patch() if callable(patch) else patch
patch_.set_clip_on(False)
ax.add_patch(patch_)
ax.set_transform(ax.transAxes)
ax.set_clip_path(patch_)
# plot map and countour
im = ax.imshow(Zi, cmap=cmap, vmin=vmin, vmax=vmax, origin='lower',
aspect='equal', extent=(xmin, xmax, ymin, ymax),
interpolation=image_interp)
# This tackles an incomprehensible matplotlib bug if no contours are
# drawn. To avoid rescalings, we will always draw contours.
# But if no contours are desired we only draw one and make it invisible .
no_contours = False
if contours in (False, None):
contours, no_contours = 1, True
cont = ax.contour(Xi, Yi, Zi, contours, colors='k',
linewidths=linewidth)
if no_contours is True:
for col in cont.collections:
col.set_visible(False)
if _is_default_outlines:
from matplotlib import patches
patch_ = patches.Ellipse((0, 0),
2 * outlines['clip_radius'][0],
2 * outlines['clip_radius'][1],
clip_on=True,
transform=ax.transData)
if _is_default_outlines or patch is not None:
im.set_clip_path(patch_)
# ax.set_clip_path(patch_)
if cont is not None:
for col in cont.collections:
col.set_clip_path(patch_)
if sensors is not False and mask is None:
_plot_sensors(pos_x, pos_y, sensors=sensors, ax=ax)
elif sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
idx = np.where(~mask)[0]
_plot_sensors(pos_x[idx], pos_y[idx], sensors=sensors, ax=ax)
elif not sensors and mask is not None:
idx = np.where(mask)[0]
ax.plot(pos_x[idx], pos_y[idx], **mask_params)
if isinstance(outlines, dict):
outlines_ = dict([(k, v) for k, v in outlines.items() if k not in
['patch', 'autoshrink']])
for k, (x, y) in outlines_.items():
if 'mask' in k:
continue
ax.plot(x, y, color='k', linewidth=linewidth, clip_on=False)
if show_names:
if show_names is True:
def _show_names(x):
return x
else:
_show_names = show_names
show_idx = np.arange(len(names)) if mask is None else np.where(mask)[0]
for ii, (p, ch_id) in enumerate(zip(pos, names)):
if ii not in show_idx:
continue
ch_id = _show_names(ch_id)
ax.text(p[0], p[1], ch_id, horizontalalignment='center',
verticalalignment='center', size='x-small')
plt.subplots_adjust(top=.95)
if onselect is not None:
ax.RS = RectangleSelector(ax, onselect=onselect)
plt_show(show)
return im, cont
def _make_image_mask(outlines, pos, res):
"""Aux function
"""
mask_ = np.c_[outlines['mask_pos']]
xmin, xmax = (np.min(np.r_[np.inf, mask_[:, 0]]),
np.max(np.r_[-np.inf, mask_[:, 0]]))
ymin, ymax = (np.min(np.r_[np.inf, mask_[:, 1]]),
np.max(np.r_[-np.inf, mask_[:, 1]]))
if outlines.get('autoshrink', False) is not False:
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
while np.any(outlier_points): # auto shrink
pos *= 0.99
inside = _inside_contour(pos, mask_)
outside = np.invert(inside)
outlier_points = pos[outside]
image_mask = np.zeros((res, res), dtype=bool)
xi_mask = np.linspace(xmin, xmax, res)
yi_mask = np.linspace(ymin, ymax, res)
Xi_mask, Yi_mask = np.meshgrid(xi_mask, yi_mask)
pos_ = np.c_[Xi_mask.flatten(), Yi_mask.flatten()]
inds = _inside_contour(pos_, mask_)
image_mask[inds.reshape(image_mask.shape)] = True
return image_mask, pos
def _inside_contour(pos, contour):
"""Aux function"""
npos = len(pos)
x, y = pos[:, :2].T
check_mask = np.ones((npos), dtype=bool)
check_mask[((x < np.min(x)) | (y < np.min(y)) |
(x > np.max(x)) | (y > np.max(y)))] = False
critval = 0.1
sel = np.where(check_mask)[0]
for this_sel in sel:
contourx = contour[:, 0] - pos[this_sel, 0]
contoury = contour[:, 1] - pos[this_sel, 1]
angle = np.arctan2(contoury, contourx)
angle = np.unwrap(angle)
total = np.sum(np.diff(angle))
check_mask[this_sel] = np.abs(total) > critval
return check_mask
def plot_ica_components(ica, picks=None, ch_type=None, res=64,
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=False, title=None,
show=True, outlines='head', contours=6,
image_interp='bilinear', head_pos=None):
"""Project unmixing matrix on interpolated sensor topogrpahy.
Parameters
----------
ica : instance of mne.preprocessing.ICA
The ICA solution.
picks : int | array-like | None
The indices of the sources to be plotted.
If None all are plotted in batches of 20.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
res : int
The resolution of the topomap image (n pixels along each side).
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap
Colormap.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
title : str | None
Title to use.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.pyplot.Figure or list
The figure object(s).
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid import make_axes_locatable
from ..channels import _get_ch_type
if picks is None: # plot components by sets of 20
ch_type = _get_ch_type(ica, ch_type)
n_components = ica.mixing_matrix_.shape[1]
p = 20
figs = []
for k in range(0, n_components, p):
picks = range(k, min(k + p, n_components))
fig = plot_ica_components(ica, picks=picks,
ch_type=ch_type, res=res, layout=layout,
vmax=vmax, cmap=cmap, sensors=sensors,
colorbar=colorbar, title=title,
show=show, outlines=outlines,
contours=contours,
image_interp=image_interp)
figs.append(fig)
return figs
elif np.isscalar(picks):
picks = [picks]
ch_type = _get_ch_type(ica, ch_type)
data = np.dot(ica.mixing_matrix_[:, picks].T,
ica.pca_components_[:ica.n_components_])
if ica.info is None:
raise RuntimeError('The ICA\'s measurement info is missing. Please '
'fit the ICA or add the corresponding info object.')
data_picks, pos, merge_grads, names, _ = _prepare_topo_plot(ica, ch_type,
layout)
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines not in (None, 'head'):
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
data = np.atleast_2d(data)
data = data[:, data_picks]
# prepare data for iteration
fig, axes = _prepare_trellis(len(data), max_col=5)
if title is None:
title = 'ICA components'
fig.suptitle(title)
if merge_grads:
from ..channels.layout import _merge_grad_data
for ii, data_, ax in zip(picks, data, axes):
ax.set_title('IC #%03d' % ii, fontsize=12)
data_ = _merge_grad_data(data_) if merge_grads else data_
vmin_, vmax_ = _setup_vmin_vmax(data_, vmin, vmax)
im = plot_topomap(data_.flatten(), pos, vmin=vmin_, vmax=vmax_,
res=res, axis=ax, cmap=cmap, outlines=outlines,
image_mask=image_mask, contours=contours,
image_interp=image_interp, show=False)[0]
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format='%3.2f', cmap=cmap)
cbar.ax.tick_params(labelsize=12)
cbar.set_ticks((vmin_, vmax_))
cbar.ax.set_title('AU', fontsize=10)
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
tight_layout(fig=fig)
fig.subplots_adjust(top=0.95)
fig.canvas.draw()
plt_show(show)
return fig
def plot_tfr_topomap(tfr, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean', layout=None,
vmin=None, vmax=None, cmap=None, sensors=True,
colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of specific time-frequency intervals of TFR data
Parameters
----------
tfr : AvereageTFR
The AvereageTFR object.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file
was found, the layout is automatically generated from the sensor
locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data) or in case
data contains only positive values 0. If callable, the output equals
vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range. If None, the
maximum value is used. If callable, the output equals vmax(data).
Defaults to None.
cmap : matplotlib colormap | None
Colormap. If None and the plotted data is all positive, defaults to
'Reds'. If None and data contains also negative values, defaults to
'RdBu_r'. Defaults to None.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle will
be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : str | None
The unit of the channel type used for colorbar labels.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axis | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Show figure if True.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(tfr, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
picks, pos, merge_grads, names, _ = _prepare_topo_plot(tfr, ch_type,
layout)
if not show_names:
names = None
data = tfr.data
if mode is not None and baseline is not None:
data = rescale(data, tfr.times, baseline, mode, copy=True)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(tfr.times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(tfr.freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
data = data[picks, ifmin:ifmax, itmin:itmax]
data = np.mean(np.mean(data, axis=2), axis=1)[:, np.newaxis]
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
norm = False if np.min(data) < 0 else True
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm)
if cmap is None:
cmap = 'Reds' if norm else 'RdBu_r'
if axes is None:
fig = plt.figure()
ax = fig.gca()
else:
fig = axes.figure
ax = axes
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
if title is not None:
ax.set_title(title)
fig_wrapper = list()
selection_callback = partial(_onselect, tfr=tfr, pos=pos, ch_type=ch_type,
itmin=itmin, itmax=itmax, ifmin=ifmin,
ifmax=ifmax, cmap=cmap, fig=fig_wrapper,
layout=layout)
im, _ = plot_topomap(data[:, 0], pos, vmin=vmin, vmax=vmax,
axis=ax, cmap=cmap, image_interp='bilinear',
contours=False, names=names, show_names=show_names,
show=False, onselect=selection_callback)
if colorbar:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt, cmap=cmap)
cbar.set_ticks((vmin, vmax))
cbar.ax.tick_params(labelsize=12)
cbar.ax.set_title('AU')
plt_show(show)
return fig
def plot_evoked_topomap(evoked, times="auto", ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1e3, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
time_format='%01d ms', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None,
axes=None):
"""Plot topographic maps of specific time points of evoked data
Parameters
----------
evoked : Evoked
The Evoked object.
times : float | array of floats | "auto" | "peaks".
The time point(s) to plot. If "auto", the number of ``axes`` determines
the amount of time point(s). If ``axes`` is also None, 10 topographies
will be shown with a regular time spacing between the first and last
time instant. If "peaks", finds time points automatically by checking
for local maxima in global field power.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found, the
layout is automatically generated from the sensor locations.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data). Defaults to None.
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib plot
format string (e.g., 'r+' for red plusses). If True, a circle will be
used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1e3 (ms).
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
time_format : str
String format for topomap values. Defaults to "%01d ms"
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g., to
delete the prefix 'MEG ' from all channel names, pass the function
lambda x: x.replace('MEG ', ''). If `mask` is not None, only
significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw. If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used. All matplotlib options are
accepted.
average : float | None
The time window around a given time to be used for averaging (seconds).
For example, 0.01 would translate into window that starts 5 ms before
and ends 5 ms after a given time point. Defaults to None, which means
no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of the
same length as ``times`` (unless ``times`` is None). If instance of
Axes, ``times`` must be a float or a list of one float.
Defaults to None.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(evoked, ch_type)
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable # noqa
mask_params = _handle_default('mask_params', mask_params)
mask_params['markersize'] *= size / 2.
mask_params['markeredgewidth'] *= size / 2.
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(times, string_types):
if times == "peaks":
npeaks = 10 if axes is None else len(axes)
times = _find_peaks(evoked, npeaks)
elif times == "auto":
if axes is None:
times = np.linspace(evoked.times[0], evoked.times[-1], 10)
else:
times = np.linspace(evoked.times[0], evoked.times[-1],
len(axes))
elif np.isscalar(times):
times = [times]
times = np.array(times)
if times.ndim != 1:
raise ValueError('times must be 1D, got %d dimensions' % times.ndim)
if len(times) > 20:
raise RuntimeError('Too many plots requested. Please pass fewer '
'than 20 time instants.')
n_times = len(times)
nax = n_times + bool(colorbar)
width = size * nax
height = size + max(0, 0.1 * (4 - size)) + bool(title) * 0.5
if axes is None:
plt.figure(figsize=(width, height))
axes = list()
for ax_idx in range(len(times)):
if colorbar: # Make room for the colorbar
axes.append(plt.subplot(1, n_times + 1, ax_idx + 1))
else:
axes.append(plt.subplot(1, n_times, ax_idx + 1))
elif colorbar:
logger.warning('Colorbar is drawn to the rightmost column of the '
'figure.\nBe sure to provide enough space for it '
'or turn it off with colorbar=False.')
if len(axes) != n_times:
raise RuntimeError('Axes and times must be equal in sizes.')
tmin, tmax = evoked.times[[0, -1]]
_time_comp = _time_mask(times=times, tmin=tmin, tmax=tmax)
if not np.all(_time_comp):
raise ValueError('Times should be between {0:0.3f} and {1:0.3f}. (Got '
'{2}).'.format(tmin, tmax,
['%03.f' % t
for t in times[_time_comp]]))
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
evoked, ch_type, layout)
if ch_type.startswith('planar'):
key = 'grad'
else:
key = ch_type
scale = _handle_default('scalings', scale)[key]
unit = _handle_default('units', unit)[key]
if not show_names:
names = None
w_frame = plt.rcParams['figure.subplot.wspace'] / (2 * nax)
top_frame = max((0.05 if title is None else 0.25), .2 / size)
fig = axes[0].get_figure()
fig.subplots_adjust(left=w_frame, right=1 - w_frame, bottom=0,
top=1 - top_frame)
time_idx = [np.where(evoked.times >= t)[0][0] for t in times]
if proj is True and evoked.proj is not True:
data = evoked.copy().apply_proj().data
else:
data = evoked.data
if average is None:
data = data[np.ix_(picks, time_idx)]
elif isinstance(average, float):
if not average > 0:
raise ValueError('The average parameter must be positive. You '
'passed a negative value')
data_ = np.zeros((len(picks), len(time_idx)))
ave_time = float(average) / 2.
iter_times = evoked.times[time_idx]
for ii, (idx, tmin_, tmax_) in enumerate(zip(time_idx,
iter_times - ave_time,
iter_times + ave_time)):
my_range = (tmin_ < evoked.times) & (evoked.times < tmax_)
data_[:, ii] = data[picks][:, my_range].mean(-1)
data = data_
else:
raise ValueError('The average parameter must be None or a float.'
'Check your input.')
data *= scale
if merge_grads:
from ..channels.layout import _merge_grad_data
data = _merge_grad_data(data)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
images, contours_ = [], []
if mask is not None:
_picks = picks[::2 if ch_type not in ['mag', 'eeg'] else 1]
mask_ = mask[np.ix_(_picks, time_idx)]
pos, outlines = _check_outlines(pos, outlines, head_pos)
if outlines is not None:
image_mask, pos = _make_image_mask(outlines, pos, res)
else:
image_mask = None
for idx, time in enumerate(times):
tp, cn = plot_topomap(data[:, idx], pos, vmin=vmin, vmax=vmax,
sensors=sensors, res=res, names=names,
show_names=show_names, cmap=cmap,
mask=mask_[:, idx] if mask is not None else None,
mask_params=mask_params, axis=axes[idx],
outlines=outlines, image_mask=image_mask,
contours=contours, image_interp=image_interp,
show=False)
images.append(tp)
if cn is not None:
contours_.append(cn)
if time_format is not None:
axes[idx].set_title(time_format % (time * scale_time))
if title is not None:
plt.suptitle(title, verticalalignment='top', size='x-large')
if colorbar:
cax = plt.subplot(1, n_times + 1, n_times + 1)
# resize the colorbar (by default the color fills the whole axes)
cpos = cax.get_position()
if size <= 1:
cpos.x0 = 1 - (.7 + .1 / size) / nax
cpos.x1 = cpos.x0 + .1 / nax
cpos.y0 = .2
cpos.y1 = .7
cax.set_position(cpos)
if unit is not None:
cax.set_title(unit)
cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt)
cbar.set_ticks([vmin, 0, vmax])
if proj == 'interactive':
_check_delayed_ssp(evoked)
params = dict(evoked=evoked, fig=fig, projs=evoked.info['projs'],
picks=picks, images=images, contours=contours_,
time_idx=time_idx, scale=scale, merge_grads=merge_grads,
res=res, pos=pos, image_mask=image_mask,
plot_update_proj_callback=_plot_update_evoked_topomap)
_draw_proj_checkbox(None, params)
plt_show(show)
return fig
def _plot_topomap_multi_cbar(data, pos, ax, title=None, unit=None,
vmin=None, vmax=None, cmap='RdBu_r',
colorbar=False, cbar_fmt='%3.3f'):
"""Aux Function"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
ax.set_yticks([])
ax.set_xticks([])
ax.set_frame_on(False)
vmin = np.min(data) if vmin is None else vmin
vmax = np.max(data) if vmax is None else vmax
if title is not None:
ax.set_title(title, fontsize=10)
im, _ = plot_topomap(data, pos, vmin=vmin, vmax=vmax, axis=ax,
cmap=cmap, image_interp='bilinear', contours=False,
show=False)
if colorbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="10%", pad=0.25)
cbar = plt.colorbar(im, cax=cax, format=cbar_fmt)
cbar.set_ticks((vmin, vmax))
if unit is not None:
cbar.ax.set_title(unit, fontsize=8)
cbar.ax.tick_params(labelsize=8)
@verbose
def plot_epochs_psd_topomap(epochs, bands=None, vmin=None, vmax=None,
tmin=None, tmax=None,
proj=False, n_fft=256, ch_type=None,
n_overlap=0, layout=None,
cmap='RdBu_r', agg_fun=None, dB=False, n_jobs=1,
normalize=False, cbar_fmt='%0.3f',
outlines='head', show=True, verbose=None):
"""Plot the topomap of the power spectral density across epochs
Parameters
----------
epochs : instance of Epochs
The epochs object
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
n_fft : int
Number of points to use in Welch FFT calculations.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are collected in
pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
n_overlap : int
The number of points of overlap between blocks.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
n_jobs : int
Number of jobs to run in parallel.
normalize : bool
If True, each band will be devided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
from ..channels import _get_ch_type
ch_type = _get_ch_type(epochs, ch_type)
picks, pos, merge_grads, names, ch_type = _prepare_topo_plot(
epochs, ch_type, layout)
psds, freqs = compute_epochs_psd(epochs, picks=picks, n_fft=n_fft,
tmin=tmin, tmax=tmax,
n_overlap=n_overlap, proj=proj,
n_jobs=n_jobs)
psds = np.mean(psds, axis=0)
if merge_grads:
from ..channels.layout import _merge_grad_data
psds = _merge_grad_data(psds)
return plot_psds_topomap(
psds=psds, freqs=freqs, pos=pos, agg_fun=agg_fun, vmin=vmin,
vmax=vmax, bands=bands, cmap=cmap, dB=dB, normalize=normalize,
cbar_fmt=cbar_fmt, outlines=outlines, show=show)
def plot_psds_topomap(
psds, freqs, pos, agg_fun=None, vmin=None, vmax=None, bands=None,
cmap='RdBu_r', dB=True, normalize=False, cbar_fmt='%0.3f',
outlines='head', show=True):
"""Plot spatial maps of PSDs
Parameters
----------
psds : np.ndarray of float, shape (n_channels, n_freqs)
Power spectral densities
freqs : np.ndarray of float, shape (n_freqs)
Frequencies used to compute psds.
pos : numpy.ndarray of float, shape (n_sensors, 2)
The positions of the sensors.
agg_fun : callable
The function used to aggregate over frequencies.
Defaults to np.sum. if normalize is True, else np.mean.
vmin : float | callable | None
The value specifying the lower bound of the color range.
If None np.min(data) is used. If callable, the output equals
vmin(data).
vmax : float | callable | None
The value specifying the upper bound of the color range.
If None, the maximum absolute value is used. If callable, the output
equals vmax(data). Defaults to None.
bands : list of tuple | None
The lower and upper frequency and the name for that band. If None,
(default) expands to:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
dB : bool
If True, transform data to decibels (with ``10 * np.log10(data)``)
following the application of `agg_fun`. Only valid if normalize is
False.
normalize : bool
If True, each band will be devided by the total power. Defaults to
False.
cbar_fmt : str
The colorbar format. Defaults to '%0.3f'.
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will be
drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos' will
serve as image mask, and the 'autoshrink' (bool) field will trigger
automated shrinking of the positions due to points outside the outline.
Alternatively, a matplotlib patch object can be passed for advanced
masking options, either directly or as a function that returns patches
(required for multi-axis plots). If None, nothing will be drawn.
Defaults to 'head'.
show : bool
Show figure if True.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
import matplotlib.pyplot as plt
if bands is None:
bands = [(0, 4, 'Delta'), (4, 8, 'Theta'), (8, 12, 'Alpha'),
(12, 30, 'Beta'), (30, 45, 'Gamma')]
if agg_fun is None:
agg_fun = np.sum if normalize is True else np.mean
if normalize is True:
psds /= psds.sum(axis=-1)[..., None]
assert np.allclose(psds.sum(axis=-1), 1.)
n_axes = len(bands)
fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5))
if n_axes == 1:
axes = [axes]
for ax, (fmin, fmax, title) in zip(axes, bands):
freq_mask = (fmin < freqs) & (freqs < fmax)
if freq_mask.sum() == 0:
raise RuntimeError('No frequencies in band "%s" (%s, %s)'
% (title, fmin, fmax))
data = agg_fun(psds[:, freq_mask], axis=1)
if dB is True and normalize is False:
data = 10 * np.log10(data)
unit = 'dB'
else:
unit = 'power'
_plot_topomap_multi_cbar(data, pos, ax, title=title,
vmin=vmin, vmax=vmax, cmap=cmap,
colorbar=True, unit=unit, cbar_fmt=cbar_fmt)
tight_layout(fig=fig)
fig.canvas.draw()
plt_show(show)
return fig
def _onselect(eclick, erelease, tfr, pos, ch_type, itmin, itmax, ifmin, ifmax,
cmap, fig, layout=None):
"""Callback called from topomap for drawing average tfr over channels."""
import matplotlib.pyplot as plt
pos, _ = _check_outlines(pos, outlines='head', head_pos=None)
ax = eclick.inaxes
xmin = min(eclick.xdata, erelease.xdata)
xmax = max(eclick.xdata, erelease.xdata)
ymin = min(eclick.ydata, erelease.ydata)
ymax = max(eclick.ydata, erelease.ydata)
indices = [i for i in range(len(pos)) if pos[i][0] < xmax and
pos[i][0] > xmin and pos[i][1] < ymax and pos[i][1] > ymin]
for idx, circle in enumerate(ax.artists):
if idx in indices:
circle.set_color('r')
else:
circle.set_color('black')
plt.gcf().canvas.draw()
if not indices:
return
data = tfr.data
if ch_type == 'mag':
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
elif ch_type == 'grad':
picks = pick_types(tfr.info, meg=ch_type, ref_meg=False)
from ..channels.layout import _pair_grad_sensors
grads = _pair_grad_sensors(tfr.info, layout=layout,
topomap_coords=False)
idxs = list()
for idx in indices:
idxs.append(grads[idx * 2])
idxs.append(grads[idx * 2 + 1]) # pair of grads
data = np.mean(data[idxs, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[x] for x in idxs]
elif ch_type == 'eeg':
picks = pick_types(tfr.info, meg=False, eeg=True, ref_meg=False)
data = np.mean(data[indices, ifmin:ifmax, itmin:itmax], axis=0)
chs = [tfr.ch_names[picks[x]] for x in indices]
logger.info('Averaging TFR over channels ' + str(chs))
if len(fig) == 0:
fig.append(figure_nobar())
if not plt.fignum_exists(fig[0].number):
fig[0] = figure_nobar()
ax = fig[0].add_subplot(111)
itmax = min(itmax, len(tfr.times) - 1)
ifmax = min(ifmax, len(tfr.freqs) - 1)
extent = (tfr.times[itmin] * 1e3, tfr.times[itmax] * 1e3, tfr.freqs[ifmin],
tfr.freqs[ifmax])
title = 'Average over %d %s channels.' % (len(chs), ch_type)
ax.set_title(title)
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Frequency (Hz)')
img = ax.imshow(data, extent=extent, aspect="auto", origin="lower",
cmap=cmap)
if len(fig[0].get_axes()) < 2:
fig[0].get_axes()[1].cbar = fig[0].colorbar(mappable=img)
else:
fig[0].get_axes()[1].cbar.on_mappable_changed(mappable=img)
fig[0].canvas.draw()
plt.figure(fig[0].number)
plt_show(True)
def _find_peaks(evoked, npeaks):
"""Helper function for finding peaks from evoked data
Returns ``npeaks`` biggest peaks as a list of time points.
"""
argrelmax = _get_argrelmax()
gfp = evoked.data.std(axis=0)
order = len(evoked.times) // 30
if order < 1:
order = 1
peaks = argrelmax(gfp, order=order, axis=0)[0]
if len(peaks) > npeaks:
max_indices = np.argsort(gfp[peaks])[-npeaks:]
peaks = np.sort(peaks[max_indices])
times = evoked.times[peaks]
if len(times) == 0:
times = [evoked.times[gfp.argmax()]]
return times
|
yousrabk/mne-python
|
mne/viz/topomap.py
|
Python
|
bsd-3-clause
| 66,755
|
from floyd.client.base import FloydHttpClient
from floyd.model.version import CliVersion
from floyd.log import logger as floyd_logger
class VersionClient(FloydHttpClient):
"""
Client to get API version from the server
"""
def __init__(self):
self.url = "/cli_version"
super(VersionClient, self).__init__(skip_auth=True)
def get_cli_version(self):
response = self.request("GET", self.url)
data_dict = response.json()
floyd_logger.debug("CLI Version info: %s", data_dict)
return CliVersion.from_dict(data_dict)
|
houqp/floyd-cli
|
floyd/client/version.py
|
Python
|
apache-2.0
| 580
|
import sqlalchemy as sa
from sqlalchemy_utils import table_name
from tests import TestCase
class TestTableName(TestCase):
def create_models(self):
class Building(self.Base):
__tablename__ = 'building'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
self.Building = Building
def test_class(self):
assert table_name(self.Building) == 'building'
del self.Building.__tablename__
assert table_name(self.Building) == 'building'
def test_attribute(self):
assert table_name(self.Building.id) == 'building'
assert table_name(self.Building.name) == 'building'
def test_target(self):
assert table_name(self.Building()) == 'building'
|
tonyseek/sqlalchemy-utils
|
tests/functions/test_table_name.py
|
Python
|
bsd-3-clause
| 775
|
import logging
import six
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list, Experiment
from ray.tune.analysis import ExperimentAnalysis
from ray.tune.suggest import BasicVariantGenerator
from ray.tune.trial import Trial
from ray.tune.trainable import Trainable
from ray.tune.ray_trial_executor import RayTrialExecutor
from ray.tune.registry import get_trainable_cls
from ray.tune.syncer import wait_for_sync
from ray.tune.trial_runner import TrialRunner
from ray.tune.progress_reporter import CLIReporter, JupyterNotebookReporter
from ray.tune.schedulers import (HyperBandScheduler, AsyncHyperBandScheduler,
FIFOScheduler, MedianStoppingRule)
from ray.tune.web_server import TuneServer
logger = logging.getLogger(__name__)
_SCHEDULERS = {
"FIFO": FIFOScheduler,
"MedianStopping": MedianStoppingRule,
"HyperBand": HyperBandScheduler,
"AsyncHyperBand": AsyncHyperBandScheduler,
}
try:
class_name = get_ipython().__class__.__name__
IS_NOTEBOOK = True if "Terminal" not in class_name else False
except NameError:
IS_NOTEBOOK = False
def _make_scheduler(args):
if args.scheduler in _SCHEDULERS:
return _SCHEDULERS[args.scheduler](**args.scheduler_config)
else:
raise TuneError("Unknown scheduler: {}, should be one of {}".format(
args.scheduler, _SCHEDULERS.keys()))
def _check_default_resources_override(run_identifier):
if not isinstance(run_identifier, six.string_types):
# If obscure dtype, assume it is overriden.
return True
trainable_cls = get_trainable_cls(run_identifier)
return hasattr(trainable_cls, "default_resource_request") and (
trainable_cls.default_resource_request.__code__ !=
Trainable.default_resource_request.__code__)
def _report_progress(runner, reporter, done=False):
"""Reports experiment progress.
Args:
runner (TrialRunner): Trial runner to report on.
reporter (ProgressReporter): Progress reporter.
done (bool): Whether this is the last progress report attempt.
"""
trials = runner.get_trials()
if reporter.should_report(trials, done=done):
sched_debug_str = runner.scheduler_alg.debug_string()
executor_debug_str = runner.trial_executor.debug_string()
reporter.report(trials, sched_debug_str, executor_debug_str)
def run(run_or_experiment,
name=None,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_to_cloud=None,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
global_checkpoint_period=10,
export_formats=None,
max_failures=0,
restore=None,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
progress_reporter=None,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
return_trials=False,
ray_auto_init=True,
sync_function=None):
"""Executes training.
Args:
run_or_experiment (function|class|str|Experiment): If
function|class|str, this is the algorithm or model to train.
This may refer to the name of a built-on algorithm
(e.g. RLLib's DQN or PPO), a user-defined trainable
function or class, or the string identifier of a
trainable function or class registered in the tune registry.
If Experiment, then Tune will execute training based on
Experiment.spec.
name (str): Name of experiment.
stop (dict|callable): The stopping criteria. If dict, the keys may be
any field in the return result of 'train()', whichever is
reached first. If function, it must take (trial_id, result) as
arguments and return a boolean (True if trial should be stopped,
False otherwise). This can also be a subclass of
``ray.tune.Stopper``, which allows users to implement
custom experiment-wide stopping (i.e., stopping an entire Tune
run based on some time constraint).
config (dict): Algorithm-specific configuration for Tune variant
generation (e.g. env, hyperparams). Defaults to empty dict.
Custom search algorithms may ignore this.
resources_per_trial (dict): Machine resources to allocate per trial,
e.g. ``{"cpu": 64, "gpu": 8}``. Note that GPUs will not be
assigned unless you specify them here. Defaults to 1 CPU and 0
GPUs in ``Trainable.default_resource_request()``.
num_samples (int): Number of times to sample from the
hyperparameter space. Defaults to 1. If `grid_search` is
provided as an argument, the grid will be repeated
`num_samples` of times.
local_dir (str): Local dir to save training results to.
Defaults to ``~/ray_results``.
upload_dir (str): Optional URI to sync training results and checkpoints
to (e.g. ``s3://bucket`` or ``gs://bucket``).
trial_name_creator (func): Optional function for generating
the trial string representation.
loggers (list): List of logger creators to be used with
each Trial. If None, defaults to ray.tune.logger.DEFAULT_LOGGERS.
See `ray/tune/logger.py`.
sync_to_cloud (func|str): Function for syncing the local_dir to and
from upload_dir. If string, then it must be a string template that
includes `{source}` and `{target}` for the syncer to run. If not
provided, the sync command defaults to standard S3 or gsutil sync
commands.
sync_to_driver (func|str|bool): Function for syncing trial logdir from
remote node to local. If string, then it must be a string template
that includes `{source}` and `{target}` for the syncer to run.
If True or not provided, it defaults to using rsync. If False,
syncing to driver is disabled.
checkpoint_freq (int): How many training iterations between
checkpoints. A value of 0 (default) disables checkpointing.
checkpoint_at_end (bool): Whether to checkpoint at the end of the
experiment regardless of the checkpoint_freq. Default is False.
sync_on_checkpoint (bool): Force sync-down of trial checkpoint to
driver. If set to False, checkpoint syncing from worker to driver
is asynchronous and best-effort. This does not affect persistent
storage syncing. Defaults to True.
keep_checkpoints_num (int): Number of checkpoints to keep. A value of
`None` keeps all checkpoints. Defaults to `None`. If set, need
to provide `checkpoint_score_attr`.
checkpoint_score_attr (str): Specifies by which attribute to rank the
best checkpoint. Default is increasing order. If attribute starts
with `min-` it will rank attribute in decreasing order, i.e.
`min-validation_loss`.
global_checkpoint_period (int): Seconds between global checkpointing.
This does not affect `checkpoint_freq`, which specifies frequency
for individual trials.
export_formats (list): List of formats that exported at the end of
the experiment. Default is None.
max_failures (int): Try to recover a trial at least this many times.
Ray will recover from the latest checkpoint if present.
Setting to -1 will lead to infinite recovery retries.
Setting to 0 will disable retries. Defaults to 3.
restore (str): Path to checkpoint. Only makes sense to set if
running 1 trial. Defaults to None.
search_alg (SearchAlgorithm): Search Algorithm. Defaults to
BasicVariantGenerator.
scheduler (TrialScheduler): Scheduler for executing
the experiment. Choose among FIFO (default), MedianStopping,
AsyncHyperBand, HyperBand and PopulationBasedTraining. Refer to
ray.tune.schedulers for more options.
with_server (bool): Starts a background Tune server. Needed for
using the Client API.
server_port (int): Port number for launching TuneServer.
verbose (int): 0, 1, or 2. Verbosity mode. 0 = silent,
1 = only status updates, 2 = status and trial results.
progress_reporter (ProgressReporter): Progress reporter for reporting
intermediate experiment progress. Defaults to CLIReporter if
running in command-line, or JupyterNotebookReporter if running in
a Jupyter notebook.
resume (str|bool): One of "LOCAL", "REMOTE", "PROMPT", or bool.
LOCAL/True restores the checkpoint from the local_checkpoint_dir.
REMOTE restores the checkpoint from remote_checkpoint_dir.
PROMPT provides CLI feedback. False forces a new
experiment. If resume is set but checkpoint does not exist,
ValueError will be thrown.
queue_trials (bool): Whether to queue trials when the cluster does
not currently have enough resources to launch one. This should
be set to True when running on an autoscaling cluster to enable
automatic scale-up.
reuse_actors (bool): Whether to reuse actors between different trials
when possible. This can drastically speed up experiments that start
and stop actors often (e.g., PBT in time-multiplexing mode). This
requires trials to have the same resource requirements.
trial_executor (TrialExecutor): Manage the execution of trials.
raise_on_failed_trial (bool): Raise TuneError if there exists failed
trial (of ERROR state) when the experiments complete.
ray_auto_init (bool): Automatically starts a local Ray cluster
if using a RayTrialExecutor (which is the default) and
if Ray is not initialized. Defaults to True.
sync_function: Deprecated. See `sync_to_cloud` and
`sync_to_driver`.
Returns:
List of Trial objects.
Raises:
TuneError if any trials failed and `raise_on_failed_trial` is True.
Examples:
>>> tune.run(mytrainable, scheduler=PopulationBasedTraining())
>>> tune.run(mytrainable, num_samples=5, reuse_actors=True)
>>> tune.run(
>>> "PG",
>>> num_samples=5,
>>> config={
>>> "env": "CartPole-v0",
>>> "lr": tune.sample_from(lambda _: np.random.rand())
>>> }
>>> )
"""
trial_executor = trial_executor or RayTrialExecutor(
queue_trials=queue_trials,
reuse_actors=reuse_actors,
ray_auto_init=ray_auto_init)
if isinstance(run_or_experiment, list):
experiments = run_or_experiment
else:
experiments = [run_or_experiment]
if len(experiments) > 1:
logger.info(
"Running multiple concurrent experiments is experimental and may "
"not work with certain features.")
for i, exp in enumerate(experiments):
if not isinstance(exp, Experiment):
run_identifier = Experiment.register_if_needed(exp)
experiments[i] = Experiment(
name=name,
run=run_identifier,
stop=stop,
config=config,
resources_per_trial=resources_per_trial,
num_samples=num_samples,
local_dir=local_dir,
upload_dir=upload_dir,
sync_to_driver=sync_to_driver,
trial_name_creator=trial_name_creator,
loggers=loggers,
checkpoint_freq=checkpoint_freq,
checkpoint_at_end=checkpoint_at_end,
sync_on_checkpoint=sync_on_checkpoint,
keep_checkpoints_num=keep_checkpoints_num,
checkpoint_score_attr=checkpoint_score_attr,
export_formats=export_formats,
max_failures=max_failures,
restore=restore,
sync_function=sync_function)
else:
logger.debug("Ignoring some parameters passed into tune.run.")
if sync_to_cloud:
for exp in experiments:
assert exp.remote_checkpoint_dir, (
"Need `upload_dir` if `sync_to_cloud` given.")
runner = TrialRunner(
search_alg=search_alg or BasicVariantGenerator(),
scheduler=scheduler or FIFOScheduler(),
local_checkpoint_dir=experiments[0].checkpoint_dir,
remote_checkpoint_dir=experiments[0].remote_checkpoint_dir,
sync_to_cloud=sync_to_cloud,
stopper=experiments[0].stopper,
checkpoint_period=global_checkpoint_period,
resume=resume,
launch_web_server=with_server,
server_port=server_port,
verbose=bool(verbose > 1),
trial_executor=trial_executor)
for exp in experiments:
runner.add_experiment(exp)
if progress_reporter is None:
if IS_NOTEBOOK:
progress_reporter = JupyterNotebookReporter(overwrite=verbose < 2)
else:
progress_reporter = CLIReporter()
# User Warning for GPUs
if trial_executor.has_gpus():
if isinstance(resources_per_trial,
dict) and "gpu" in resources_per_trial:
# "gpu" is manually set.
pass
elif _check_default_resources_override(experiments[0].run_identifier):
# "default_resources" is manually overriden.
pass
else:
logger.warning("Tune detects GPUs, but no trials are using GPUs. "
"To enable trials to use GPUs, set "
"tune.run(resources_per_trial={'gpu': 1}...) "
"which allows Tune to expose 1 GPU to each trial. "
"You can also override "
"`Trainable.default_resource_request` if using the "
"Trainable API.")
while not runner.is_finished():
runner.step()
if verbose:
_report_progress(runner, progress_reporter)
try:
runner.checkpoint(force=True)
except Exception:
logger.exception("Trial Runner checkpointing failed.")
if verbose:
_report_progress(runner, progress_reporter, done=True)
wait_for_sync()
errored_trials = []
for trial in runner.get_trials():
if trial.status != Trial.TERMINATED:
errored_trials += [trial]
if errored_trials:
if raise_on_failed_trial:
raise TuneError("Trials did not complete", errored_trials)
else:
logger.error("Trials did not complete: %s", errored_trials)
trials = runner.get_trials()
if return_trials:
return trials
logger.info("Returning an analysis object by default. You can call "
"`analysis.trials` to retrieve a list of trials. "
"This message will be removed in future versions of Tune.")
return ExperimentAnalysis(runner.checkpoint_file, trials=trials)
def run_experiments(experiments,
search_alg=None,
scheduler=None,
with_server=False,
server_port=TuneServer.DEFAULT_PORT,
verbose=2,
progress_reporter=None,
resume=False,
queue_trials=False,
reuse_actors=False,
trial_executor=None,
raise_on_failed_trial=True,
concurrent=True):
"""Runs and blocks until all trials finish.
Examples:
>>> experiment_spec = Experiment("experiment", my_func)
>>> run_experiments(experiments=experiment_spec)
>>> experiment_spec = {"experiment": {"run": my_func}}
>>> run_experiments(experiments=experiment_spec)
>>> run_experiments(
>>> experiments=experiment_spec,
>>> scheduler=MedianStoppingRule(...))
>>> run_experiments(
>>> experiments=experiment_spec,
>>> search_alg=SearchAlgorithm(),
>>> scheduler=MedianStoppingRule(...))
Returns:
List of Trial objects, holding data for each executed trial.
"""
# This is important to do this here
# because it schematize the experiments
# and it conducts the implicit registration.
experiments = convert_to_experiment_list(experiments)
if concurrent:
return run(
experiments,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
progress_reporter=progress_reporter,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
return_trials=True)
else:
trials = []
for exp in experiments:
trials += run(
exp,
search_alg=search_alg,
scheduler=scheduler,
with_server=with_server,
server_port=server_port,
verbose=verbose,
progress_reporter=progress_reporter,
resume=resume,
queue_trials=queue_trials,
reuse_actors=reuse_actors,
trial_executor=trial_executor,
raise_on_failed_trial=raise_on_failed_trial,
return_trials=True)
return trials
|
stephanie-wang/ray
|
python/ray/tune/tune.py
|
Python
|
apache-2.0
| 18,227
|
import sys, os
import subprocess
nw_exe = os.path.normpath(sys.argv[1])
nw_dll = os.path.normpath(sys.argv[2])
node_dll = os.path.normpath(sys.argv[3])
out_file = os.path.normpath(sys.argv[4])
sym_file = nw_exe + ".sym"
dll_sym_file = nw_dll + ".sym"
node_sym_file = node_dll + ".sym"
dump_exe = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dump_syms.exe')
subprocess.call([dump_exe, nw_exe], stdout=open(sym_file, 'w'))
subprocess.call([dump_exe, nw_dll], stdout=open(dll_sym_file, 'w'))
subprocess.call([dump_exe, node_dll], stdout=open(node_sym_file, 'w'))
lzma_exe = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', '..', 'third_party', 'lzma_sdk', 'Executable', '7za.exe')
subprocess.call([lzma_exe, 'a', '-t7z', out_file, sym_file, dll_sym_file, node_sym_file])
|
nwjs/nw.js
|
tools/dump_win_syms.py
|
Python
|
mit
| 804
|
# ICE Revision: $Id$
"""Watches output and analyzes it"""
from .BasicWatcher import BasicWatcher
from .AnalyzedCommon import AnalyzedCommon
class AnalyzedWatcher(BasicWatcher,AnalyzedCommon):
def __init__(self,filename,analyzer,silent=False,tailLength=1000,sleep=0.1):
"""@param analyzer: analyzer
@param filename: name of the logfile to watch
@param silent: if True no output is sent to stdout
@param tailLength: number of bytes at the end of the fail that should be output.
Because data is output on a per-line-basis
@param sleep: interval to sleep if no line is returned"""
BasicWatcher.__init__(self,filename,silent=silent,tailLength=tailLength,sleep=sleep)
AnalyzedCommon.__init__(self,self.filename,analyzer)
# Should work with Python3 and Python2
|
takaakiaoki/PyFoam
|
PyFoam/Execution/AnalyzedWatcher.py
|
Python
|
gpl-2.0
| 827
|
#!/usr/bin/env python
"""
Loads a json molecule and draws atoms in Blender.
Blender scripts are weird. Either run this inside of Blender or in a shell with
blender foo.blend -P molecule_to_blender.py
The script expects an input file named "molecule.json" and should be in the
same directory as "atoms.json"
Written by Patrick Fuller, patrickfuller@gmail.com, 28 Nov 12
"""
import bpy
from math import acos
from mathutils import Vector
import json
import os
import sys
# Atomic radii from wikipedia, scaled to Blender radii (C = 0.4 units)
# http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page)
# Atomic colors from cpk
# http://jmol.sourceforge.net/jscolors/
PATH = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(PATH, 'atoms.json')) as in_file:
atom_data = json.load(in_file)
def draw_molecule(molecule, center=(0, 0, 0), show_bonds=True, join=True,
name='molecule'):
"""Draw a JSON-formatted molecule in Blender.
This method uses a couple of tricks from [1] to improve rendering speed.
In particular, it minimizes the amount of unique meshes and materials,
and doesn't draw until all objects are initialized.
[1] https://blenderartists.org/forum/showthread.php
?273149-Generating-a-large-number-of-mesh-primitives
Args:
molecule: The molecule to be drawn, as a python object following the
JSON convention set in this project.
center: (Optional, default (0, 0, 0)) Cartesian center of molecule. Use
to draw multiple molecules in different locations.
show_bonds: (Optional, default True) Draws a ball-and-stick model if
True, and a space-filling model if False.
join: (Optional, default True) Joins the molecule into a single object.
Set to False if you want to individually manipulate atoms/bonds.
name: (Optional, default "molecule") Collection name for this molecule.
Returns:
If run in a blender context, will return a visual object of the
molecule.
"""
collection = bpy.data.collections.new(name)
bpy.context.scene.collection.children.link(collection)
shapes = []
# If using space-filling model, scale up atom size and remove bonds
# Add atom primitive
bpy.ops.object.select_all(action='DESELECT')
bpy.ops.mesh.primitive_uv_sphere_add()
sphere = bpy.context.object
# Initialize bond material if it's going to be used.
if show_bonds:
bpy.data.materials.new(name='bond')
bpy.data.materials['bond'].diffuse_color = atom_data['bond']['color'] + [1]
bpy.data.materials['bond'].specular_intensity = 0.2
bpy.ops.mesh.primitive_cylinder_add()
cylinder = bpy.context.object
cylinder.active_material = bpy.data.materials['bond']
for atom in molecule['atoms']:
if atom['element'] not in atom_data:
atom['element'] = 'undefined'
if atom['element'] not in bpy.data.materials:
key = atom['element']
bpy.data.materials.new(name=key)
bpy.data.materials[key].diffuse_color = atom_data[key]['color'] + [1]
bpy.data.materials[key].specular_intensity = 0.2
atom_sphere = sphere.copy()
atom_sphere.data = sphere.data.copy()
atom_sphere.location = [l + c for l, c in
zip(atom['location'], center)]
scale = 1 if show_bonds else 2.5
atom_sphere.dimensions = [atom_data[atom['element']]['radius'] *
scale * 2] * 3
atom_sphere.active_material = bpy.data.materials[atom['element']]
collection.objects.link(atom_sphere)
shapes.append(atom_sphere)
for bond in (molecule['bonds'] if show_bonds else []):
start = molecule['atoms'][bond['atoms'][0]]['location']
end = molecule['atoms'][bond['atoms'][1]]['location']
diff = [c2 - c1 for c2, c1 in zip(start, end)]
cent = [(c2 + c1) / 2 for c2, c1 in zip(start, end)]
mag = sum([(c2 - c1) ** 2 for c1, c2 in zip(start, end)]) ** 0.5
v_axis = Vector(diff).normalized()
v_obj = Vector((0, 0, 1))
v_rot = v_obj.cross(v_axis)
# This check prevents gimbal lock (ie. weird behavior when v_axis is
# close to (0, 0, 1))
if v_rot.length > 0.01:
v_rot = v_rot.normalized()
axis_angle = [acos(v_obj.dot(v_axis))] + list(v_rot)
else:
v_rot = Vector((1, 0, 0))
axis_angle = [0] * 4
if bond['order'] not in range(1, 4):
sys.stderr.write("Improper number of bonds! Defaulting to 1.\n")
bond['order'] = 1
if bond['order'] == 1:
trans = [[0] * 3]
elif bond['order'] == 2:
trans = [[1.4 * atom_data['bond']['radius'] * x for x in v_rot],
[-1.4 * atom_data['bond']['radius'] * x for x in v_rot]]
elif bond['order'] == 3:
trans = [[0] * 3,
[2.2 * atom_data['bond']['radius'] * x for x in v_rot],
[-2.2 * atom_data['bond']['radius'] * x for x in v_rot]]
for i in range(bond['order']):
bond_cylinder = cylinder.copy()
bond_cylinder.data = cylinder.data.copy()
bond_cylinder.dimensions = [atom_data['bond']['radius'] * scale *
2] * 2 + [mag]
bond_cylinder.location = [c + scale * v for c,
v in zip(cent, trans[i])]
bond_cylinder.rotation_mode = 'AXIS_ANGLE'
bond_cylinder.rotation_axis_angle = axis_angle
collection.objects.link(bond_cylinder)
shapes.append(bond_cylinder)
# Remove primitive meshes
bpy.ops.object.select_all(action='DESELECT')
sphere.select_set(True)
if show_bonds:
cylinder.select_set(True)
# If the starting cube is there, remove it
if 'Cube' in bpy.data.objects.keys():
bpy.data.objects.get('Cube').select_set(True)
bpy.ops.object.delete()
for shape in shapes:
shape.select_set(True)
bpy.context.view_layer.objects.active = shapes[0]
bpy.ops.object.shade_smooth()
if join:
bpy.ops.object.join()
for obj in bpy.context.selected_objects:
obj.name = name
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
if __name__ == '__main__':
"""Uses Blender's limited argv interface to pass args from main script."""
args = sys.argv[sys.argv.index('--') + 1:]
show_bonds, join = True, True
if '--space-filling' in args:
show_bonds = False
args.remove('--space-filling')
if '--no-join' in args:
join = False
args.remove('--no-join')
try:
with open(args[0]) as in_file:
molecule = json.load(in_file)
except IOError:
molecule = json.loads(args[0])
draw_molecule(molecule, show_bonds=show_bonds, join=join)
|
patrickfuller/blender-chemicals
|
blender_chemicals/draw.py
|
Python
|
mit
| 7,037
|
'''Class for pickling and encrypting data'''
__title__ = 'EncryptedPickle'
__version__ = '0.1.4'
__author__ = 'Vingd, Inc.'
__author_email__ = 'developers@vingd.com'
__url__ = 'https://github.com/vingd/encrypted-pickle-python'
__copyright__ = 'Copyright 2013 Vingd, Inc.'
__license__ = 'MIT License'
|
vingd/encrypted-pickle-python
|
encryptedpickle/__init__.py
|
Python
|
mit
| 301
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="ohlc.hoverlabel", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/ohlc/hoverlabel/_bgcolor.py
|
Python
|
mit
| 500
|
#!/usr/bin/env python3
import argparse
import matplotlib.pyplot as plot
import matplotlib.ticker as ticker
import re
import scipy
import wells.publisher as publisher
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interactive",
help="Interactive mode",
action="store_true")
parser.add_argument("-e", "--ext",
help="Output image extension",
type=str,
default="png")
parser.add_argument("-s", "--figsize",
help="Figure size",
type=str,
default=("2.8, 2.8"))
parser.add_argument("-c", "--colorbar",
help="Show colorbar",
action="store_true")
parser.add_argument("--nx", "--xn",
help="Number of x ticks",
type=int,
default=5)
parser.add_argument("--ny", "--yn",
help="Number of y ticks",
type=int,
default=6)
parser.add_argument("--nc", "--cn",
help="Number of colorbar ticks",
type=int,
default=4)
parser.add_argument("--dx",
help="Major x-axis tick step",
type=float,
default=None)
parser.add_argument("--dy",
help="Major y-axis tick step",
type=float,
default=None)
parser.add_argument("--mdx",
help="Minor x-axis tick step",
type=float,
default=None)
parser.add_argument("--mdy",
help="Minor y-axis tick step",
type=float,
default=None)
parser.add_argument("--minx", "--xmin",
help="Minimum x coordinate",
type=float)
parser.add_argument("--maxx", "--xmax",
help="Maximum x coordinate",
type=float)
parser.add_argument("--miny", "--ymin",
help="Minimum y coordinate",
type=float)
parser.add_argument("--maxy", "--ymax",
help="Maximum y coordinate",
type=float)
parser.add_argument("--dbmin", "--mindb",
help="Minimum decibels level to display",
type=float,
default=-60)
parser.add_argument("--ssx",
help="Subsample in x",
type=int)
parser.add_argument("--ssy",
help="Subsample in y",
type=int)
parser.add_argument("-p", "--physical-units",
help="Use physical units for plot labels",
action="store_true")
parser.add_argument("input",
help="Input file",
type=str,
nargs="+")
args = parser.parse_args()
workspace = scipy.load(args.input[0])
t = workspace["t"]
x = workspace["x"]
bg = workspace["background"]
delta = workspace["delta"]
pump = workspace["pump"]
loss = workspace["loss"]
if args.physical_units:
# This is very ad-hoc.
delta0 = 1E11 # Hardcoded, but what?
beta0 = 250 # ... and this too.
xu = scipy.sqrt(beta0/delta0)
x = xu * x
t = 2*scipy.pi/delta0 * t
mm = 1.0
ns = 1.0
if args.physical_units:
mm = 1E-3
ns = 1E-9
x = x/mm
t = t/ns
miny = args.miny if args.miny is not None else x.min()
maxy = args.maxy if args.maxy is not None else x.max()
minc = args.dbmin
maxc = 0
window = (x > miny) & (x < maxy)
x = x[window]
if args.ssy:
x = x[::args.ssy]
pattern = re.compile(r"mint=(.*?)_")
def mint(filename):
match = pattern.search(filename)
if match:
return float(match.group(1))
ts = []
yss = []
for n, filename in enumerate(sorted(args.input, key=mint)):
print("%d/%d: %s" % (n+1, len(args.input), filename))
workspace = scipy.load(filename)
t = workspace["t"]
ys = workspace["states"]
ys = ys[:, window]
if args.ssy:
ys = ys[:, ::args.ssy]
if args.ssx:
t = t[::args.ssx]
ys = ys[::args.ssx, :]
ts.append(t)
yss.append(ys)
if t.max() >= args.maxx:
break
t = scipy.hstack(ts)
ys = scipy.vstack(yss)
print("Resulting image shape:", ys.shape)
minx = args.minx if args.minx is not None else t.min()
maxx = args.maxx if args.maxx is not None else t.max()
ys = abs(ys)
ys = ys / ys.max()
ys = 20 * scipy.log10(ys)
if args.physical_units:
xlabel = "$t,~\mathrm{ns}$"
ylabel = "$z,~\mathrm{mm}$"
else:
xlabel = "$t$"
ylabel = "$z$"
if not args.interactive:
figsize = [float(x) for x in args.figsize.split(",")]
filename = ("delta=%.2f_"
"pump=%.2E_"
"loss=%.2E_"
"mint=%.2f_"
"maxt=%.2f_timedomain2"
% (delta, pump, loss, minx, maxx))
publisher.init({"figure.figsize": figsize})
plot.figure()
axs = plot.subplot(1, 1, 1)
plot.pcolormesh(t, x, ys.T, cmap="magma", rasterized=True)
plot.xlim(minx, maxx)
plot.ylim(miny, maxy)
plot.clim(minc, maxc)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
if args.nx is not None:
axs.xaxis.set_major_locator(
ticker.MaxNLocator(args.nx))
if args.ny is not None:
axs.yaxis.set_major_locator(
ticker.MaxNLocator(args.ny))
if args.dx is not None:
axs.xaxis.set_major_locator(
ticker.MultipleLocator(args.dx))
if args.dy is not None:
axs.yaxis.set_major_locator(
ticker.MultipleLocator(args.dy))
if args.mdx is not None:
axs.xaxis.set_minor_locator(
ticker.MultipleLocator(args.mdx))
if args.mdy is not None:
axs.yaxis.set_minor_locator(
ticker.MultipleLocator(args.mdy))
axs.tick_params(which="both", direction="out")
if args.colorbar:
cb = plot.colorbar()
cb.set_label("dB")
if args.interactive:
plot.show()
else:
publisher.publish(filename, args.ext)
|
ioreshnikov/wells
|
timedomain2.py
|
Python
|
mit
| 5,970
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(default=b'', max_length=255)),
('rules', models.TextField()),
('notes', models.TextField(blank=True)),
],
options={
'db_table': 'groups',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='GroupUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('group', models.ForeignKey(to='access.Group')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'groups_users',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='groupuser',
unique_together=set([('group', 'user')]),
),
migrations.AddField(
model_name='group',
name='users',
field=models.ManyToManyField(related_name='groups', through='access.GroupUser', to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
]
|
ingenioustechie/zamboni
|
mkt/access/migrations/0001_initial.py
|
Python
|
bsd-3-clause
| 1,814
|
#!/usr/bin/python
import os, subprocess, datetime, time
class zxinghost:
__java_host__ = "zxingHost"
def __init__(self, loc = './', zxing_libs = ["core.jar", "javase.jar"]):
libs = [loc + l for l in zxing_libs]
libs.insert(0, loc)
cmd = ["java", "-cp", os.pathsep.join(libs), self.__java_host__]
try:
self.__zxing_process = subprocess.Popen(cmd,
shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
time.sleep(1)
except Exception as error:
ptint(error)
raise error
def __del__(self):
self.close()
def decodeBase64(self, base64):
if self.__zxing_process:
cmd = 'base64 {0}\n'.format(base64)
return self.__sendCommand(cmd)
def decodeFile(self, file):
if self.__zxing_process:
cmd = 'file {0}\n'.format(file)
return self.__sendCommand(cmd)
def encode(self, text, file):
if self.__zxing_process:
cmd = 'create {0} {1}\n'.format(text, file)
return self.__sendCommand(cmd)
def close(self):
if self.__zxing_process:
self.__zxing_process.communicate(input='q\n')
del(self.__zxing_process)
def __sendCommand(self, command):
self.__zxing_process.stdin.write(command)
self.__zxing_process.stdin.flush()
out = self.__zxing_process.stdout.readline()
if hasattr(out, 'strip'): out = out.strip(os.linesep)
return out
if __name__ == '__main__':
zxing_loc = "zxing2.2/"
zxing = zxinghost('./', [zxing_loc + "core.jar", zxing_loc + "javase.jar"])
for i in range(10):
print("{0} - {1}".format(datetime.datetime.now().time(), i))
out = zxing.encode('"Welcome to Python.org"', 'test.png')
print("{0} - {1}".format(datetime.datetime.now().time(), out))
out = zxing.decodeBase64(
'iVBORw0KGgoAAAANSUhEUgAAAMgAAADIAQAAAACFI5MzAAAA10lEQVR42u3XOw7DIAwGYDNxDG6ahptyDKa4fmV'\
'I2sz8kYyiiuZbLGwMIX4alJKS8g6ZJKNM+sy267QhiU47D5F+/sWRnVqf/kj4iHJURhUqjCiWbY5fLPFdoov6f/'\
'8slBijPPWdhSJRaw0228TXbCMIyRbR1760QGJtr1jIW+WDkCRe6IkhscscS2Ql65Cn3HfwavFh9bhbSQKJF6DlP'\
'FoLkMQtQHyrZ4+BET3N2GtQco4nfkMBFe15eklhLNFsW2vRSvy5HyyVOChUblEvl/yeS0l5qXwBYfaE7gyTgqsA'\
'AAAASUVORK5CYII=')
print("{0} - {1}".format(datetime.datetime.now().time(), out))
out = zxing.decodeFile('test.png')
print("{0} - {1}".format(datetime.datetime.now().time(), out))
del(zxing)
|
Jarrey/python-ipc-zxing
|
zxinghost.py
|
Python
|
apache-2.0
| 2,827
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import shutil
import select
import subprocess
import nixops.util
import nixops.resources
class MachineDefinition(nixops.resources.ResourceDefinition):
"""Base class for NixOps machine definitions."""
def __init__(self, xml):
nixops.resources.ResourceDefinition.__init__(self, xml)
self.encrypted_links_to = set([e.get("value") for e in xml.findall("attrs/attr[@name='encryptedLinksTo']/list/string")])
self.store_keys_on_machine = xml.find("attrs/attr[@name='storeKeysOnMachine']/bool").get("value") == "true"
self.keys = {k.get("name"): k.find("string").get("value") for k in xml.findall("attrs/attr[@name='keys']/attrs/attr")}
self.owners = [e.get("value") for e in xml.findall("attrs/attr[@name='owners']/list/string")]
class SSHMaster(object):
def __init__(self, tempdir, name, ssh_name, ssh_flags):
self._tempdir = tempdir
self._control_socket = tempdir + "/ssh-master-" + name
self._ssh_name = ssh_name
res = subprocess.call(
["ssh", "-x", "root@" + self._ssh_name, "-S", self._control_socket,
"-M", "-N", "-f", '-oNumberOfPasswordPrompts=0', '-oServerAliveInterval=60']
+ ssh_flags)
if res != 0:
raise SSHConnectionFailed("unable to start SSH master connection to ‘{0}’".format(name))
self.opts = ["-S", self._control_socket]
def __del__(self):
subprocess.call(
["ssh", "root@" + self._ssh_name,
"-S", self._control_socket, "-O", "exit"], stderr=nixops.util.devnull)
class MachineState(nixops.resources.ResourceState):
"""Base class for NixOps machine state objects."""
vm_id = nixops.util.attr_property("vmId", None)
ssh_pinged = nixops.util.attr_property("sshPinged", False, bool)
public_vpn_key = nixops.util.attr_property("publicVpnKey", None)
store_keys_on_machine = nixops.util.attr_property("storeKeysOnMachine", True, bool)
keys = nixops.util.attr_property("keys", [], 'json')
owners = nixops.util.attr_property("owners", [], 'json')
# Nix store path of the last global configuration deployed to this
# machine. Used to check whether this machine is up to date with
# respect to the global configuration.
cur_configs_path = nixops.util.attr_property("configsPath", None)
# Nix store path of the last machine configuration deployed to
# this machine.
cur_toplevel = nixops.util.attr_property("toplevel", None)
def __init__(self, depl, name, id):
nixops.resources.ResourceState.__init__(self, depl, name, id)
self._ssh_pinged_this_time = False
self.ssh_master = None
self._ssh_private_key_file = None
def get_definition_prefix(self):
return ""
@property
def started(self):
state = self.state
return state == self.STARTING or state == self.UP
def set_common_state(self, defn):
self.store_keys_on_machine = defn.store_keys_on_machine
self.keys = defn.keys
def stop(self):
"""Stop this machine, if possible."""
self.warn("don't know how to stop machine ‘{0}’".format(self.name))
def start(self):
"""Start this machine, if possible."""
pass
def get_load_avg(self):
"""Get the load averages on the machine."""
try:
res = self.run_command("cat /proc/loadavg", capture_stdout=True, timeout=15).rstrip().split(' ')
assert len(res) >= 3
return res
except SSHConnectionFailed:
return None
except SSHCommandFailed:
return None
# FIXME: Move this to ResourceState so that other kinds of
# resources can be checked.
def check(self):
"""Check machine state."""
res = CheckResult()
self._check(res)
return res
def _check(self, res):
avg = self.get_load_avg()
if avg == None:
if self.state == self.UP: self.state = self.UNREACHABLE
res.is_reachable = False
else:
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
res.is_reachable = True
res.load = avg
# Get the systemd units that are in a failed state.
out = self.run_command("systemctl --all --full", capture_stdout=True).split('\n')
res.failed_units = []
for l in out:
match = re.match("^([^ ]+) .* failed .*$", l)
if match: res.failed_units.append(match.group(1))
# Currently in systemd, failed mounts enter the
# "inactive" rather than "failed" state. So check for
# that. Hack: ignore special filesystems like
# /sys/kernel/config. Systemd tries to mount these
# even when they don't exist.
match = re.match("^([^\.]+\.mount) .* inactive .*$", l)
if match and not match.group(1).startswith("sys-") and not match.group(1).startswith("dev-"):
res.failed_units.append(match.group(1))
def restore(self, defn, backup_id, devices=[]):
"""Restore persistent disks to a given backup, if possible."""
self.warn("don't know how to restore disks from backup for machine ‘{0}’".format(self.name))
def remove_backup(self, backup_id):
"""Remove a given backup of persistent disks, if possible."""
self.warn("don't know how to remove a backup for machine ‘{0}’".format(self.name))
def backup(self, defn, backup_id):
"""Make backup of persistent disks, if possible."""
self.warn("don't know how to make backup of disks for machine ‘{0}’".format(self.name))
def reboot(self):
"""Reboot this machine."""
self.log("rebooting...")
# The sleep is to prevent the reboot from causing the SSH
# session to hang.
self.run_command("(sleep 2; reboot) &")
self.state = self.STARTING
def reboot_sync(self):
"""Reboot this machine and wait until it's up again."""
self.reboot()
self.log_start("waiting for the machine to finish rebooting...")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, open=False, callback=lambda: self.log_continue("."))
self.log_continue("[down]")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, callback=lambda: self.log_continue("."))
self.log_end("[up]")
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
self.send_keys()
def send_keys(self):
if self.store_keys_on_machine: return
self.run_command("mkdir -m 0700 -p /run/keys")
for k, v in self.get_keys().items():
self.log("uploading key ‘{0}’...".format(k))
tmp = self.depl.tempdir + "/key-" + self.name
f = open(tmp, "w+"); f.write(v); f.close()
self.upload_file(tmp, "/run/keys/" + k)
os.remove(tmp)
self.run_command("touch /run/keys/done")
def get_keys(self):
return self.keys
def get_ssh_name(self):
assert False
def get_ssh_flags(self):
return []
@property
def public_ipv4(self):
return None
@property
def private_ipv4(self):
return None
def address_to(self, m):
"""Return the IP address to be used to access machone "m" from this machine."""
ip = m.public_ipv4
if ip: return ip
return None
def wait_for_ssh(self, check=False):
"""Wait until the SSH port is open on this machine."""
if self.ssh_pinged and (not check or self._ssh_pinged_this_time): return
self.log_start("waiting for SSH...")
nixops.util.wait_for_tcp_port(self.get_ssh_name(), 22, callback=lambda: self.log_continue("."))
self.log_end("")
self.state = self.UP
self.ssh_pinged = True
self._ssh_pinged_this_time = True
def _open_ssh_master(self, timeout=None):
"""Start an SSH master connection to speed up subsequent SSH sessions."""
if self.ssh_master is not None: return
tries = 1 if timeout else 5
while True:
try:
self.ssh_master = SSHMaster(self.depl.tempdir, self.name, self.get_ssh_name(),
self.get_ssh_flags() + (["-o", "ConnectTimeout={0}".format(timeout)] if timeout else []))
break
except Exception:
tries = tries - 1
if tries == 0: raise
pass
def write_ssh_private_key(self, private_key):
key_file = "{0}/id_nixops-{1}".format(self.depl.tempdir, self.name)
with os.fdopen(os.open(key_file, os.O_CREAT | os.O_WRONLY, 0600), "w") as f:
f.write(private_key)
self._ssh_private_key_file = key_file
return key_file
def get_ssh_private_key_file(self):
return None
def _logged_exec(self, command, check=True, capture_stdout=False, stdin_string=None, env=None):
stdin = subprocess.PIPE if stdin_string != None else nixops.util.devnull
if capture_stdout:
process = subprocess.Popen(command, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
fds = [process.stdout, process.stderr]
log_fd = process.stderr
else:
process = subprocess.Popen(command, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env)
fds = [process.stdout]
log_fd = process.stdout
# FIXME: this can deadlock if stdin_string doesn't fit in the
# kernel pipe buffer.
if stdin_string != None: process.stdin.write(stdin_string)
for fd in fds: nixops.util.make_non_blocking(fd)
at_new_line = True
stdout = ""
while len(fds) > 0:
# The timeout/poll is to deal with processes (like
# VBoxManage) that start children that go into the
# background but keep the parent's stdout/stderr open,
# preventing an EOF. FIXME: Would be better to catch
# SIGCHLD.
(r, w, x) = select.select(fds, [], [], 1)
if len(r) == 0 and process.poll() != None: break
if capture_stdout and process.stdout in r:
data = process.stdout.read()
if data == "":
fds.remove(process.stdout)
else:
stdout += data
if log_fd in r:
data = log_fd.read()
if data == "":
if not at_new_line: self.log_end("")
fds.remove(log_fd)
else:
start = 0
while start < len(data):
end = data.find('\n', start)
if end == -1:
self.log_start(data[start:])
at_new_line = False
else:
s = data[start:end]
if at_new_line:
self.log(s)
else:
self.log_end(s)
at_new_line = True
if end == -1: break
start = end + 1
res = process.wait()
if stdin_string != None: process.stdin.close()
if check and res != 0:
raise SSHCommandFailed("command ‘{0}’ failed on machine ‘{1}’".format(command, self.name))
return stdout if capture_stdout else res
def run_command(self, command, check=True, capture_stdout=False, stdin_string=None, timeout=None):
"""Execute a command on the machine via SSH."""
# Note that the timeout is only respected if this is the first
# call to _open_ssh_master().
self._open_ssh_master(timeout=timeout)
cmdline = (
["ssh", "-x", "root@" + self.get_ssh_name()] +
self.ssh_master.opts + self.get_ssh_flags() + [command])
return self._logged_exec(cmdline, check=check, capture_stdout=capture_stdout, stdin_string=stdin_string)
def copy_closure_to(self, path):
"""Copy a closure to this machine."""
# !!! Implement copying between cloud machines, as in the Perl
# version.
# It's usually faster to let the target machine download
# substitutes from nixos.org, so try that first.
if not self.has_really_fast_connection():
closure = subprocess.check_output(["nix-store", "-qR", path]).splitlines()
self.run_command("nix-store -j 4 -r --ignore-unknown " + ' '.join(closure), check=False)
# Any remaining paths are copied from the local machine.
env = dict(os.environ)
env['NIX_SSHOPTS'] = ' '.join(self.get_ssh_flags());
self._logged_exec(
["nix-copy-closure", "--to", "root@" + self.get_ssh_name(), path]
+ ([] if self.has_really_fast_connection() else ["--gzip"]),
env=env)
def has_really_fast_connection(self):
return False
def generate_vpn_key(self):
try:
self.run_command("test -f /root/.ssh/id_charon_vpn")
_vpn_key_exists = True
except SSHCommandFailed:
_vpn_key_exists = False
if self.public_vpn_key and _vpn_key_exists: return
(private, public) = nixops.util.create_key_pair(key_name="NixOps VPN key of {0}".format(self.name))
f = open(self.depl.tempdir + "/id_vpn-" + self.name, "w+")
f.write(private)
f.seek(0)
# FIXME: use run_command
res = subprocess.call(
["ssh", "-x", "root@" + self.get_ssh_name()]
+ self.get_ssh_flags() +
["umask 077 && mkdir -p /root/.ssh && cat > /root/.ssh/id_charon_vpn"],
stdin=f)
f.close()
if res != 0: raise Exception("unable to upload VPN key to ‘{0}’".format(self.name))
self.public_vpn_key = public
def upload_file(self, source, target, recursive=False):
self._open_ssh_master()
# FIXME: use ssh master
if recursive:
recursive_cmdline = [ '-r' ]
else:
recursive_cmdline = [ ]
cmdline = ["scp"] + self.get_ssh_flags() + recursive_cmdline + [source, "root@" + self.get_ssh_name() + ":" + target]
return self._logged_exec(cmdline)
def download_file(self, source, target, recursive=False):
self._open_ssh_master()
# FIXME: use ssh master
if recursive:
recursive_cmdline = [ '-r' ]
else:
recursive_cmdline = [ ]
cmdline = ["scp"] + self.get_ssh_flags() + recursive_cmdline + ["root@" + self.get_ssh_name() + ":" + source, target]
return self._logged_exec(cmdline)
def get_console_output(self):
return "(not available for this machine type)\n"
class SSHConnectionFailed(Exception):
pass
class SSHCommandFailed(Exception):
pass
class CheckResult(object):
def __init__(self):
# Whether the resource exists.
self.exists = None
# Whether the resource is "up". Generally only meaningful for
# machines.
self.is_up = None
# Whether the resource is reachable via SSH.
self.is_reachable = None
# Whether the disks that should be attached to a machine are
# in fact properly attached.
self.disks_ok = None
# List of systemd units that are in a failed state.
self.failed_units = None
# Load average on the machine.
self.load = None
# Error messages.
self.messages = []
# FIXME: add a check whether the active NixOS config on the
# machine is correct.
import nixops.backends.none
import nixops.backends.virtualbox
import nixops.backends.ec2
import nixops.resources.ec2_keypair
import nixops.resources.sqs_queue
import nixops.resources.s3_bucket
import nixops.resources.iam_role
def create_definition(xml):
"""Create a machine definition object from the given XML representation of the machine's attributes."""
target_env = xml.find("attrs/attr[@name='targetEnv']/string").get("value")
for i in [nixops.backends.none.NoneDefinition,
nixops.backends.virtualbox.VirtualBoxDefinition,
nixops.backends.ec2.EC2Definition]:
if target_env == i.get_type():
return i(xml)
raise nixops.deployment.UnknownBackend("unknown backend type ‘{0}’".format(target_env))
def create_state(depl, type, name, id):
"""Create a machine state object of the desired backend type."""
for i in [nixops.backends.none.NoneState,
nixops.backends.virtualbox.VirtualBoxState,
nixops.backends.ec2.EC2State,
nixops.resources.ec2_keypair.EC2KeyPairState,
nixops.resources.sqs_queue.SQSQueueState,
nixops.resources.iam_role.IAMRoleState,
nixops.resources.s3_bucket.S3BucketState]:
if type == i.get_type():
return i(depl, name, id)
raise nixops.deployment.UnknownBackend("unknown backend type ‘{0}’".format(type))
|
garbas/nixops
|
nixops/backends/__init__.py
|
Python
|
lgpl-3.0
| 17,426
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.orca.automl.model.base_keras_model import KerasBaseModel
from collections.abc import Iterable
import numpy as np
def model_creator(config):
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, LSTM, Dropout
import tensorflow as tf
inp = Input(shape=(None, config["input_dim"]))
if "lstm_1_units" in config and "lstm_2_units" in config:
lstm_units = (config["lstm_1_units"], config["lstm_2_units"])
else:
lstm_units = config.get("lstm_units", [32, 32])
if "dropout_1" in config and "dropout_2" in config:
dropout_rates = (config["dropout_1"], config["dropout_2"])
else:
dropout_rates = config.get("dropouts", 0.2)
lstm_units = [lstm_units] if not isinstance(lstm_units, Iterable) else lstm_units
for i, unit in enumerate(lstm_units):
return_sequences = True if i != len(lstm_units) - 1 else False
dropout_rate = dropout_rates[i] if isinstance(dropout_rates, Iterable) else dropout_rates
lstm_input = inp if i == 0 else dropout
lstm = LSTM(units=unit, return_sequences=return_sequences)(lstm_input)
dropout = Dropout(rate=dropout_rate)(lstm)
out = Dense(config["output_dim"])(dropout)
model = Model(inputs=inp, outputs=out)
model.compile(loss=config.get("loss", "mse"),
optimizer=getattr(tf.keras.optimizers, config.get("optim", "Adam"))
(learning_rate=config.get("lr", 0.001)),
metrics=[config.get("metric", "mse")])
return model
def check_iter_type(obj, type):
return isinstance(obj, type) or \
(isinstance(obj, Iterable) and all(isinstance(o, type) for o in obj))
class VanillaLSTM(KerasBaseModel):
def __init__(self, check_optional_config=False, future_seq_len=1):
super(VanillaLSTM, self).__init__(model_creator=model_creator,
check_optional_config=check_optional_config)
def _check_config(self, **config):
super()._check_config(**config)
assert isinstance(config["input_dim"], int), "'input_dim' should be int"
assert isinstance(config["output_dim"], int), "'output_dim' should be int"
lstm_name = "lstm_units"
dropout_name = "dropouts"
if lstm_name in config:
if not check_iter_type(config[lstm_name], (int, np.integer)):
raise ValueError(f"{lstm_name} should be int or an list/tuple of ints. "
f"Got {config[lstm_name]}")
if dropout_name in config:
if not check_iter_type(config[dropout_name], (float, np.float)):
raise ValueError(f"{dropout_name} should be float or a list/tuple of floats. "
f"Got {config[dropout_name]}")
if lstm_name in config and dropout_name in config:
if (isinstance(config[lstm_name], int) and isinstance(config[dropout_name], Iterable)) \
or (isinstance(config[lstm_name], Iterable) and
isinstance(config[dropout_name], Iterable) and
len(config[lstm_name]) != len(config[dropout_name])):
raise ValueError(f"{lstm_name} should have the same elements num as {dropout_name}")
def _get_required_parameters(self):
return {"input_dim",
"output_dim"
} | super()._get_required_parameters()
def _get_optional_parameters(self):
return {"lstm_units",
"dropouts",
"optim",
"lr"
} | super()._get_optional_parameters()
|
intel-analytics/BigDL
|
python/chronos/src/bigdl/chronos/model/VanillaLSTM.py
|
Python
|
apache-2.0
| 4,224
|
SUB_PACKET_SIZE = 5 # [ax, ay, az, pressure, t]
PACKET_DELIMINATOR = "<>"
SUB_PACKET_DELIMINATOR = " "
SAMPLE_MEMORY = 21
PRESSURE_NORMALIZER = 2.3 * 14.0
WINDOW_SIZE = 1000
# FIFO_SAMPLE_PERIOD = 1.0 / 100.0
SEND_POSITION_THRESHOLD = 60
ACCEL_G = 9.832
# LSB_G = 16384.0 / 2.0
LSB_G = 4096.0 / 8.0 # I think
KILL_THRESHOLD_LOW = 0.01
KILL_THRESHOLD_HIGH = 10.0
DYNAMIC_LENGTH = 1000
|
GEverding/touchVision
|
io/cleaner/constants.py
|
Python
|
mit
| 390
|
# -*- coding: utf-8 -*-
# Copyright (C) 2010-2013 Tobias Weber <tobi-weber@gmx.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from .baseServer import BaseServer
from levitas.lib import utils
log = logging.getLogger("levitas.server.eventletServer")
class EventletServer(BaseServer):
"""
Eventlet WSGI Server.
Example SETTINGS entry
======================
# Server address
httpserver_address = ("127.0.0.1", 8080)
"""
def start(self):
try:
import eventlet
from eventlet import wsgi
wsgi.server(eventlet.listen(self.server_address,
backlog=500),
self.app, max_size=8000)
except Exception as err:
log.error(str(err), exc_info=True)
finally:
log.info("HTTPD stopped")
|
tobi-weber/levitas
|
src/levitas/server/eventletServer.py
|
Python
|
apache-2.0
| 1,391
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from mlperf_compliance import mlperf_log
from mlperf_compliance import resnet_log_helper
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False,
data_format='channels_first'):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
outputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training,
fused=True,
gamma_initializer=gamma_initializer)
if is_training:
resnet_log_helper.log_batch_norm(
input_tensor=inputs,
output_tensor=outputs,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training)
if relu:
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
outputs = tf.nn.relu(outputs)
return outputs
def fixed_padding(inputs, kernel_size, data_format='channels_first'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs,
filters,
kernel_size,
strides,
is_training,
data_format='channels_first'):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
is_training: `bool` for whether the model is in training.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
inputs_for_logging = inputs
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
outputs = tf.layers.conv2d(
inputs=inputs,
filters=filters,
kernel_size=kernel_size,
strides=strides,
padding=('SAME' if strides == 1 else 'VALID'),
use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format)
if is_training:
resnet_log_helper.log_conv2d(
input_tensor=inputs_for_logging,
output_tensor=outputs,
stride=strides,
filters=filters,
initializer=mlperf_log.TRUNCATED_NORMAL,
use_bias=False)
return outputs
def residual_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first'):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=strides,
is_training=is_training,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format)
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first'):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_BLOCK_TYPE, value=mlperf_log.BOTTLENECK_BLOCK)
resnet_log_helper.log_begin_block(
input_tensor=inputs, block_type=mlperf_log.BOTTLENECK_BLOCK)
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs,
filters=filters_out,
kernel_size=1,
strides=strides,
is_training=is_training,
data_format=data_format)
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format)
if is_training:
resnet_log_helper.log_projection(
input_tensor=inputs, output_tensor=shortcut)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=1,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=filters,
kernel_size=3,
strides=strides,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=4 * filters,
kernel_size=1,
strides=1,
is_training=is_training,
data_format=data_format)
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format)
output = tf.nn.relu(inputs + shortcut)
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_SHORTCUT_ADD)
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_RELU)
resnet_log_helper.log_end_block(output_tensor=output)
return output
def block_group(inputs, filters, block_fn, blocks, strides, is_training, name,
data_format='channels_first'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block layer.
"""
# Drop batch size from shape logging.
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_INITIAL_SHAPE, value=inputs.shape.as_list()[1:])
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(inputs, filters, is_training, strides,
use_projection=True, data_format=data_format)
for _ in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, 1,
data_format=data_format)
return tf.identity(inputs, name)
def resnet_v1_generator(block_fn, layers, num_classes,
data_format='channels_first'):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
num_classes: `int` number of possible classes for image classification.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs,
filters=64,
kernel_size=7,
strides=2,
is_training=is_training,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
pooled_inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=data_format)
if is_training:
resnet_log_helper.log_max_pool(input_tensor=inputs,
output_tensor=pooled_inputs)
inputs = tf.identity(pooled_inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0],
strides=1, is_training=is_training, name='block_group1',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1],
strides=2, is_training=is_training, name='block_group2',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2],
strides=2, is_training=is_training, name='block_group3',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3],
strides=2, is_training=is_training, name='block_group4',
data_format=data_format)
# The activation is 7x7 so this is a global average pool.
# TODO(huangyp): reduce_mean will be faster.
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=pool_size, strides=1, padding='VALID',
data_format=data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(
inputs, [-1, 2048 if block_fn is bottleneck_block else 512])
if is_training:
mlperf_log.resnet_print(key=mlperf_log.MODEL_HP_DENSE, value=num_classes)
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
inputs = tf.identity(inputs, 'final_dense')
if is_training:
mlperf_log.resnet_print(
key=mlperf_log.MODEL_HP_FINAL_SHAPE, value=inputs.shape.as_list()[1:])
return inputs
model.default_image_size = 224
return model
def resnet_v1(resnet_depth, num_classes, data_format='channels_first'):
"""Returns the ResNet model for a given size and number of output classes."""
model_params = {
18: {'block': residual_block, 'layers': [2, 2, 2, 2]},
34: {'block': residual_block, 'layers': [3, 4, 6, 3]},
50: {'block': bottleneck_block, 'layers': [3, 4, 6, 3]},
101: {'block': bottleneck_block, 'layers': [3, 4, 23, 3]},
152: {'block': bottleneck_block, 'layers': [3, 8, 36, 3]},
200: {'block': bottleneck_block, 'layers': [3, 24, 36, 3]}
}
if resnet_depth not in model_params:
raise ValueError('Not a valid resnet_depth:', resnet_depth)
params = model_params[resnet_depth]
return resnet_v1_generator(
params['block'], params['layers'], num_classes, data_format)
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v3.8/resnet-tpuv3-8/code/resnet/model/staging/models/rough/resnet/resnet_model.py
|
Python
|
apache-2.0
| 16,310
|
"""Basic checks for HomeKitSwitch."""
from tests.components.homekit_controller.common import (
setup_test_component)
async def test_switch_change_outlet_state(hass, utcnow):
"""Test that we can turn a HomeKit outlet on and off again."""
from homekit.model.services import OutletService
helper = await setup_test_component(hass, [OutletService()])
await hass.services.async_call('switch', 'turn_on', {
'entity_id': 'switch.testdevice',
}, blocking=True)
assert helper.characteristics[('outlet', 'on')].value == 1
await hass.services.async_call('switch', 'turn_off', {
'entity_id': 'switch.testdevice',
}, blocking=True)
assert helper.characteristics[('outlet', 'on')].value == 0
async def test_switch_read_outlet_state(hass, utcnow):
"""Test that we can read the state of a HomeKit outlet accessory."""
from homekit.model.services import OutletService
helper = await setup_test_component(hass, [OutletService()])
# Initial state is that the switch is off and the outlet isn't in use
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == 'off'
assert switch_1.attributes['outlet_in_use'] is False
# Simulate that someone switched on the device in the real world not via HA
helper.characteristics[('outlet', 'on')].set_value(True)
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == 'on'
assert switch_1.attributes['outlet_in_use'] is False
# Simulate that device switched off in the real world not via HA
helper.characteristics[('outlet', 'on')].set_value(False)
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == 'off'
# Simulate that someone plugged something into the device
helper.characteristics[('outlet', 'outlet-in-use')].value = True
switch_1 = await helper.poll_and_get_state()
assert switch_1.state == 'off'
assert switch_1.attributes['outlet_in_use'] is True
|
MartinHjelmare/home-assistant
|
tests/components/homekit_controller/test_switch.py
|
Python
|
apache-2.0
| 1,974
|
import random
from testcases.testcases_base import TestcasesBase
import unittest, time
class TestStoragepoolsAPI(TestcasesBase):
def setUp(self):
super().setUp()
self.freeDisks = [x['name'] for x in self.core0_client.getFreeDisks()]
if self.freeDisks == []:
self.skipTest(' [*] No free disks on node {}'.format(self.nodeid))
self.lg.info(' [*] Create storagepool (SP0) on node (N0)')
self.response, self.data = self.storagepools_api.post_storagepools(node_id=self.nodeid,
free_devices=self.freeDisks)
self.assertEqual(self.response.status_code, 201)
if self.id().split('.')[-1] in ['test009_get_storagepool_filessystem', 'test010_list_storagepool_filesystems',
'test011_post_storagepool_filesystem', 'test012_delete_storagepool_filesystem']:
self.setUp_plus_fileSystem()
elif self.id().split('.')[-1] in ['test013_get_storagepool_filessystem_snapshot',
'test014_list_storagepool_filesystems_snapshots',
'test015_post_storagepool_filesystem_snapshot',
'test016_delete_storagepool_filesystem_snapshot',
'test017_post_storagepool_filesystem_snapshot_rollback']:
self.setUp_plus_fileSystem_plus_snapShots()
def tearDown(self):
self.storagepools_api.delete_storagepools_storagepoolname(self.nodeid, self.data['name'])
super().tearDown()
def setUp_plus_fileSystem(self, **kwargs):
self.lg.info(' [*] Create filesystem (FS0) on storagepool {}'.format(self.data['name']))
self.response_filesystem, self.data_filesystem = self.storagepools_api.post_storagepools_storagepoolname_filesystems(
node_id=self.nodeid,
storagepoolname=self.data['name'], **kwargs)
self.assertEqual(self.response_filesystem.status_code, 201, " [*] Can't create filesystem on storagepool.")
def setUp_plus_fileSystem_plus_snapShots(self):
self.setUp_plus_fileSystem()
self.lg.info(' [*] Create snapshot (SS0) of filesystem {}'.format(self.data_filesystem['name']))
self.response_snapshot, self.data_snapshot = self.storagepools_api.post_filesystems_snapshots(self.nodeid,
self.data['name'],
self.data_filesystem[
'name'])
self.assertEqual(self.response_snapshot.status_code, 201, " [*] can't create new snapshot.")
def test001_get_storagepool(self):
""" GAT-045
**Test Scenario:**
#. Create storagepool (SP0) on node (N0), should succeed.
#. Get storagepool (SP0), should succeed with 200.
#. Get storagepool (SP0) using python client, should be listed
#. Get nonexisting storagepool, should fail with 404.
"""
self.lg.info(' [*] Get storagepool (SP0), should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
for key in self.data.keys():
if key == 'devices':
continue
self.assertEqual(response.json()[key], self.data[key])
self.lg.info(' [*] Get storagepool (SP0) using python client, should be listed')
storagepools = self.core0_client.client.btrfs.list()
storagepool_sp0 = [x for x in storagepools if x['label'] == 'sp_{}'.format(self.data['name'])]
self.assertNotEqual(storagepool_sp0, [])
for device in self.data['devices']:
self.assertIn(device, [x['path'][:-1] for x in storagepool_sp0[0]['devices']])
self.lg.info(' [*] Get nonexisting storagepool, should fail with 404')
response = self.storagepools_api.get_storagepools_storagepoolname(self.nodeid, 'fake_storagepool')
self.assertEqual(response.status_code, 404)
def test002_list_storagepool(self):
""" GAT-046
**Test Scenario:**
#. Create Storagepool (SP0) on node (N0).
#. list node (N0) storagepools, storagepool (SP0) should be listed.
"""
self.lg.info(' [*] list node (N0) storagepools, storagepool (SP0) should be listed')
response = self.storagepools_api.get_storagepools(self.nodeid)
self.assertEqual(response.status_code, 200)
self.assertIn(self.data['name'], [x['name'] for x in response.json()])
def test003_post_storagepool(self):
""" GAT-047
**Test Scenario:**
#. Get random nodid (N0).
#. Create storagepool (SP0) on node (N0).
#. Get storagepool (SP0), should succeed with 200.
#. Get storagepool (SP1) using python client, should be listed
#. Delete Storagepool (SP0), should succeed with 204.
#. Create invalid storagepool (missing required params), should fail with 400.
"""
self.lg.info(' [*] Get Storagepool (SP1), should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
for key in self.data.keys():
if key == 'devices':
continue
self.assertEqual(response.json()[key], self.data[key])
self.lg.info(' [*] Get storagepool (SP0) using python client, should be listed')
storagepools = self.core0_client.client.btrfs.list()
storagepool_sp1 = [x for x in storagepools if x['label'] == 'sp_{}'.format(self.data['name'])]
self.assertNotEqual(storagepool_sp1, [])
for device in self.data['devices']:
self.assertIn(device, [x['path'][:-1] for x in storagepool_sp1[0]['devices']])
self.lg.info(' [*] Delete Storagepool (SP0), should succeed with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] Create invalid storagepool, should fail with 400')
response, data = self.storagepools_api.post_storagepools(self.nodeid, free_devices=self.freeDisks,
name='', devices='')
self.assertEqual(response.status_code, 400)
def test004_delete_storagepool(self):
""" GAT-048
**Test Scenario:**
#. Create Storagepool (SP0) on node (N0).
#. Delete Storagepool (SP0), should succeed with 204.
#. list node (N0) storagepools, storagepool (SP0) should be gone.
#. Delete nonexisting storagepool, should fail with 204.
"""
self.lg.info(' [*] Delete storagepool (SP0), should succeed with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] list node (N0) storagepools, storagepool (SP0) should be gone')
response = self.storagepools_api.get_storagepools(self.nodeid)
self.assertEqual(response.status_code, 200)
self.assertNotIn(self.data['name'], [x['name'] for x in response.json()])
self.lg.info(' [*] Delete nonexisting storagepool, should fail with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname(self.nodeid, self.rand_str())
self.assertEqual(response.status_code, 204)
def test005_get_storagepool_device(self):
""" GAT-049
**Test Scenario:**
#. Create storagepool (SP0) with device (DV0) on node (N0).
#. Get device (DV0), should succeed with 200.
#. Get nonexisting device, should fail with 404.
"""
self.lg.info(' [*] Get device (DV0), should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.json(), [])
device_uuid = response.json()[0]['uuid']
response = self.storagepools_api.get_storagepools_storagepoolname_devices_deviceid(self.nodeid,
self.data['name'],
device_uuid)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['deviceName'][:-1], self.data['devices'][0])
self.assertEqual(response.json()['uuid'], device_uuid)
self.assertEqual(response.json()['status'], 'healthy')
self.lg.info(' [*] Get nonexisting device, should fail with 404')
response = self.storagepools_api.get_storagepools_storagepoolname_devices_deviceid(self.nodeid,
self.data['name'],
self.rand_str())
self.assertEqual(response.status_code, 404)
def test006_list_storagepool_devices(self):
""" GAT-050
**Test Scenario:**
#. Create storagepool (SP0) with device (DV0) on node (N0).
#. list storagepool (SP0) devices, should succeed with 200.
"""
self.lg.info(' [*] list storagepool (SP0) devices, should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.json(), [])
self.assertEqual(len(response.json()), len(self.data['devices']))
self.assertEqual(response.json()[0]['status'], 'healthy')
def test007_post_storagepool_device(self):
""" GAT-051
**Test Scenario:**
#. Get random nodid (N0).
#. Create storagepool (SP0) with device (DV0) on node (N0).
#. Create device (DV1) on storagepool (SP0), should succeed with 201.
#. list storagepool (SP0) devices, device (DV1) should be listed.
#. Create device with invalid body, should fail with 400.
"""
self.lg.info(' [*] Create device (DV1) on storagepool (SP0), should succeed with 201')
free_devices = [x['name'] for x in self.core0_client.getFreeDisks()]
if free_devices == []:
self.skipTest('no free disks on node {}'.format(self.nodeid))
device = random.choice(free_devices)
body = [device]
response = self.storagepools_api.post_storagepools_storagepoolname_devices(self.nodeid, self.data['name'],
body)
self.assertEqual(response.status_code, 201)
for _ in range(30):
free_devices = [x['name'] for x in self.core0_client.getFreeDisks()]
if device not in free_devices:
break
else:
time.sleep(1)
self.lg.info(' [*] list storagepool (SP0) devices, should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertIn(device, [x['deviceName'][:-1] for x in response.json()])
# issue https://github.com/zero-os/0-orchestrator/issues/398
# self.lg.info(' [*] Create device with invalid body, should fail with 400')
# body = ""
# response = self.storagepools_api.post_storagepools_storagepoolname_devices(self.nodeid, storagepool['name'], body)
# self.assertEqual(response.status_code, 400)
def test008_delete_storagepool_device(self):
""" GAT-052
**Test Scenario:**
#. Create storagepool (SP0) with device (DV0) on node (N1), should succeed with 201.
#. Delete device (DV0), should succeed with 204.
#. list storagepool (SP0) devices, device (DV0) should be gone.
#. Delete nonexisting device, should fail with 204.
"""
self.lg.info(' [*] Create device (DV1) on storagepool (SP0), should succeed with 201')
free_devices = [x['name'] for x in self.core0_client.getFreeDisks()]
if free_devices == []:
self.skipTest('no free disks on node {}'.format(self.nodeid))
device = random.choice(free_devices)
body = [device]
response = self.storagepools_api.post_storagepools_storagepoolname_devices(self.nodeid, self.data['name'],
body)
self.assertEqual(response.status_code, 201)
for _ in range(30):
free_devices = [x['name'] for x in self.core0_client.getFreeDisks()]
if device not in free_devices:
break
else:
time.sleep(1)
self.lg.info(' [*] list storagepool (SP0) devices, device (DV0) should be gone')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
deviceuuid = [x['uuid'] for x in response.json() if x['deviceName'][:-1] == device]
self.assertNotEqual(deviceuuid, [], 'device was not added to storagepool')
self.lg.info(' [*] Delete device (DV1), should succeed with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname_devices_deviceid(self.nodeid,
self.data['name'],
deviceuuid[0])
self.assertEqual(response.status_code, 204)
for _ in range(30):
free_devices = [x['name'] for x in self.core0_client.getFreeDisks()]
if device in free_devices:
break
else:
time.sleep(1)
self.lg.info(' [*] list storagepool (SP0) devices, device (DV0) should be gone')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertNotIn(device, [x['deviceName'][:-1] for x in response.json()])
self.lg.info(' [*] Delete nonexisting device, should fail with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname_devices_deviceid(self.nodeid,
self.data['name'],
self.rand_str())
self.assertEqual(response.status_code, 204)
def test009_get_storagepool_filessystem(self):
""" GAT-053
**Test Scenario:**
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Get filesystem (FS0), should succeed with 200.
#. Get nonexisting filesystem, should fail with 404.
"""
self.lg.info(' [*] Get filesystem (FS0), should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data['name'],
self.data_filesystem[
'name'])
self.assertEqual(response.status_code, 200)
for key in self.data_filesystem.keys():
self.assertEqual(response.json()[key], self.data_filesystem[key])
self.lg.info(' [*] Get nonexisting filesystem, should fail with 404')
response = self.storagepools_api.get_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data['name'],
self.rand_str())
self.assertEqual(response.status_code, 404)
def test010_list_storagepool_filesystems(self):
""" GAT-054
**Test Scenario:**
#. Create Storagepool (SP0) on node (N0).
#. Create filesystem (FS0) on storagepool (SP0).
#. list storagepools (SP0) filesystems, filesystem (FS0) should be listed.
"""
self.lg.info(' [*] list storagepools (SP0) filesystems, filesystem (FS0) should be listed')
response = self.storagepools_api.get_storagepools_storagepoolname_filesystems(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertIn(self.data_filesystem['name'], response.json())
def test011_post_storagepool_filesystem(self):
""" GAT-055
**Test Scenario:**
#. Get random nodid (N0).
#. Create storagepool (SP0) on node (N0).
#. Create filesystem (FS1) on storagepool (SP0), should succeed with 201.
#. Get filesystem (FS1), should succeed with 200.
#. Delete filesystem (FS1), should succeed with 204.
#. Create invalid filesystem (missing required params), should fail with 400.
"""
self.lg.info(' [*] Get filesystem (FS1), should succeed with 200')
response = self.storagepools_api.get_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data['name'],
self.data_filesystem[
'name'])
self.assertEqual(response.status_code, 200)
for key in self.data_filesystem.keys():
self.assertEqual(response.json()[key], self.data_filesystem[key])
self.lg.info(' [*] Delete filesystem (FS1), should succeed with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data[
'name'],
self.data_filesystem[
'name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] Create filesystem with invalid body, should fail with 400')
response, data = self.storagepools_api.post_storagepools_storagepoolname_filesystems(node_id=self.nodeid,
storagepoolname=self.data[
'name'],
name=123456)
self.assertEqual(response.status_code, 400)
def test012_delete_storagepool_filesystem(self):
""" GAT-056
**Test Scenario:**
#. Create Storagepool (SP0) on node (N0).
#. Create filesystem (FS0) on storagepool (SP0).
#. Delete filesystem (FS0), should succeed with 204.
#. list storagepool (SP0) filesystems, filesystem (FS0) should be gone.
#. Delete nonexisting filesystems, should fail with 204.
"""
self.lg.info(' [*] Delete filesystem (FS0), should succeed with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data[
'name'],
self.data_filesystem[
'name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] list storagepool (SP0) filesystems, filesystem (FS0) should be gone')
response = self.storagepools_api.get_storagepools_storagepoolname_filesystems(self.nodeid, self.data['name'])
self.assertEqual(response.status_code, 200)
self.assertNotIn(self.data_filesystem['name'], response.json())
self.lg.info(' [*] Delete nonexisting filesystems, should fail with 204')
response = self.storagepools_api.delete_storagepools_storagepoolname_filesystems_filesystemname(self.nodeid,
self.data[
'name'],
'fake_filesystem')
self.assertEqual(response.status_code, 204)
def test013_get_storagepool_filessystem_snapshot(self):
""" GAT-057
**Test Scenario:**
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Create snapshot (SS0) on filesystem (FS0).
#. Get snapshot (SS0), should succeed with 200.
#. Get nonexisting snapshot, should fail with 404.
"""
self.lg.info(' [*] Get snapshot (SS0), should succeed with 200')
response = self.storagepools_api.get_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
self.data_snapshot['name'])
self.assertEqual(response.status_code, 200)
for key in self.data_snapshot.keys():
self.assertEqual(response.json()[key], self.data_snapshot[key])
self.lg.info(' [*] Get nonexisting snapshot, should fail with 404')
response = self.storagepools_api.get_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
self.rand_str())
self.assertEqual(response.status_code, 404)
def test014_list_storagepool_filesystems_snapshots(self):
""" GAT-058
**Test Scenario:**
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Create snapshot (SS0) on filesystem (FS0).
#. list snapshots of filesystems (FS0), snapshot (SS0) should be listed.
"""
self.lg.info(' [*] list snapshots of filesystems (FS0), snapshot (SS0) should be listed')
response = self.storagepools_api.get_filesystem_snapshots(self.nodeid, self.data['name'],
self.data_filesystem['name'])
self.assertEqual(response.status_code, 200)
self.assertIn(self.data_snapshot['name'], response.json())
def test015_post_storagepool_filesystem_snapshot(self):
""" GAT-059
**Test Scenario:**
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Create snapshot (SS1) on filesystem (FS0).
#. Get snapshot (SS1), should succeed with 200.
#. Delete snapshot (SS1), should succeed with 204.
#. Create snapshot with missing required params, should fail with 400.
"""
self.lg.info(' [*] Get snapshot (SS1), should succeed with 200')
response = self.storagepools_api.get_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
self.data_snapshot['name'])
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['name'], self.data_snapshot['name'])
self.lg.info(' [*] Delete snapshot (SS1), should succeed with 204')
response = self.storagepools_api.delete_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
self.data_snapshot['name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] Create snapshot with missing required params, should fail with 400')
response, data = self.storagepools_api.post_filesystems_snapshots(self.nodeid, self.data['name'],
self.data_filesystem['name'],
name='')
self.assertEqual(response.status_code, 400)
def test016_delete_storagepool_filesystem_snapshot(self):
""" GAT-060
**Test Scenario:**
#. Get random nodid (N0), should succeed.
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Create snapshot (SS0) on filesystem (FS0).
#. Delete snapshot (SS0), should succeed with 204.
#. list filesystem (FS0) snapshots, snapshot (SS0) should be gone.
#. Delete nonexisting snapshot, should fail with 204.
"""
self.lg.info(' [*] Delete snapshot (SS0), should succeed with 204')
response = self.storagepools_api.delete_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
self.data_snapshot['name'])
self.assertEqual(response.status_code, 204)
self.lg.info(' [*] list filesystem (FS0) snapshots, snapshot (SS0) should be gone')
response = self.storagepools_api.get_filesystem_snapshots(self.nodeid, self.data['name'],
self.data_filesystem['name'])
self.assertEqual(response.status_code, 200)
self.assertNotIn(self.data_snapshot['name'], response.json())
self.lg.info(' [*] Delete nonexisting snapshot, should fail with 204')
response = self.storagepools_api.delete_filesystem_snapshots_snapshotname(self.nodeid, self.data['name'],
self.data_filesystem['name'],
'fake_filesystem')
self.assertEqual(response.status_code, 204)
def test017_post_storagepool_filesystem_snapshot_rollback(self):
""" GAT-152
**Test Scenario:**
#. Get random nodid (N0), should succeed.
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0).
#. Create snapshot (SS0) on filesystem (FS0).
#. Create file test.txt on filesystem (FS0).
#. Take a new snapshot (SS1).
#. Rollback filesystem to snapshot (SS0), should succeed.
#. Check that file test.txt doesn\'t exist, should succeed.
#. Rollback filesystem to snapshot (SS1), should succeed.
#. Check file test.txt exists and its data is correct, should succeed.
"""
filesystem_path = '/mnt/storagepools/{}/filesystems/{}'.format(
self.data['name'], self.data_filesystem['name']
)
self.lg.info("Create file test.txt on filesystem (FS0)")
cmd = 'echo "test" > {}/test.txt'.format(filesystem_path)
response = self.core0_client.client.bash(cmd).get()
self.assertEqual(response.state, 'SUCCESS')
self.lg.info('Take a new snapshot (SS1)')
response, new_snapshot_data = self.storagepools_api.post_filesystems_snapshots(
nodeid=self.nodeid,
storagepoolname=self.data['name'],
filesystemname=self.data_filesystem['name']
)
self.assertEqual(response.status_code, 201)
self.lg.info("Rollback filesystem to snapshot (SS0), should succeed")
response = self.storagepools_api.post_filesystem_snapshots_snapshotname_rollback(
nodeid=self.nodeid,
storagepoolname=self.data['name'],
filesystemname=self.data_filesystem['name'],
snapshotname=self.data_snapshot['name']
)
self.assertEqual(response.status_code, 204)
time.sleep(5)
self.lg.info("Check that file test.txt doesn\'t exist, should succeed")
cmd = 'ls {} | grep test.txt'.format(filesystem_path)
response = self.core0_client.client.bash(cmd).get()
self.assertNotIn('test.txt', response.stdout)
self.lg.info("Rollback filesystem to snapshot (SS1), should succeed")
response = self.storagepools_api.post_filesystem_snapshots_snapshotname_rollback(
nodeid=self.nodeid,
storagepoolname=self.data['name'],
filesystemname=self.data_filesystem['name'],
snapshotname=new_snapshot_data['name']
)
self.assertEqual(response.status_code, 204)
time.sleep(5)
self.lg.info("Check file test.txt exists and its data is correct, should succeed")
cmd = 'ls {} | grep test.txt'.format(filesystem_path)
response = self.core0_client.client.bash(cmd).get()
self.assertEqual(response.state, 'SUCCESS')
self.assertIn('test.txt', response.stdout)
cmd = 'cat {}/test.txt'.format(filesystem_path)
response = self.core0_client.client.bash(cmd).get()
self.assertEqual(response.state, 'SUCCESS')
self.assertIn('test', response.stdout.strip())
@unittest.skip("https://github.com/zero-os/0-orchestrator/issues/1246")
def test018_remove_storagepoole_last_device(self):
""" GAT-151
**Test Scenario:**
#. Get random nodid (N0).
#. Create storagepool (SP0) with single device (D0).
#. Get device (D0) uuid, should succeed.
#. Delete device (D0), should fail with 400
#. Delete storagepool (SP0), should succeed.
"""
if not self.freeDisks:
self.skipTest(' [*] No free disks on node {}'.format(self.nodeid))
self.lg.info('Create storagepool (SP0) with single device (D0)')
response, data = self.storagepools_api.post_storagepools(node_id=self.nodeid,
free_devices=[self.freeDisks[0]])
self.assertEqual(response.status_code, 201)
self.lg.info('Get device (D0) uuid, should succeed')
response = self.storagepools_api.get_storagepools_storagepoolname_devices(nodeid=self.nodeid,
storagepoolname=data['name'])
self.assertEqual(response.status_code, 200)
deviceuuid = response.json()[0]['uuid']
self.lg.info('Delete device (D0), should fail with 400')
response = self.storagepools_api.delete_storagepools_storagepoolname_devices_deviceid(
nodeid=self.nodeid,
storagepoolname=data['name'],
deviceuuid=deviceuuid
)
self.assertEqual(response.status_code, 400)
self.lg.info('Delete storagepool (SP0)')
response = self.storagepools_api.delete_storagepools_storagepoolname(self.nodeid, data['name'])
self.assertEqual(response.status_code, 204)
@unittest.skip("https://github.com/zero-os/0-orchestrator/issues/1257/1258")
def test019_create_storagepool_filesystem_different_parameters(self):
""" GAT-153
**Test Scenario:**
#. Get random nodid (N0), should succeed.
#. Create storagepool (SP0) on node (N0), should succeed.
#. Create filesystem (FS0) on storagepool (SP0) with specific quota.
#. Write a file on (FS0) with size above the quota limit, should fail
#. Write a file on (FS0) with size under the quota limit, should succeed
#. Create readonly filesystem (FS1) on storagepool (SP0).
#. Write a file on (FS1), should fail
"""
self.lg.info('Create filesystem (FS0) on storagepool (SP0) with specific quota')
self.setUp_plus_fileSystem(quota=10)
self.lg.info('Write a file on (FS0) with size above the quota limit, should fail')
filesystem_path = '/mnt/storagepools/{}/filesystems/{}'.format(
self.data['name'], self.data_filesystem['name'])
response = self.core0_client.client.bash('cd {}; fallocate -l 20M {}'.format(filesystem_path, self.rand_str())).get()
self.assertEqual(response.state, 'ERROR')
self.lg.info('Write a file on (FS0) with size under the quota limit, should succeed')
response = self.core0_client.client.bash('cd {}; fallocate -l 5M {}'.format(filesystem_path, self.rand_str())).get()
self.assertEqual(response.state, 'SUCCESS')
self.lg.info('Create readonly filesystem (FS1) on storagepool (SP0)')
self.setUp_plus_fileSystem(quota=5, readOnly=True)
self.lg.info('Write a file on (FS1), should fail')
filesystem_path = '/mnt/storagepools/{}/filesystems/{}'.format(
self.data['name'], self.data_filesystem['name'])
response = self.core0_client.client.bash('cd {}; fallocate -l 3M {}'.format(filesystem_path, self.rand_str())).get()
self.assertEqual(response.state, 'ERROR')
|
zero-os/0-orchestrator
|
tests/0_orchestrator/test_suite/testcases/basic_tests/test05_storagepools_apis.py
|
Python
|
apache-2.0
| 34,991
|
#
# Plot dimuon mass distribution for SWC HEP 2015
#
from ROOT import TFile, TBrowser, TH1D
file_events = TFile("test_data/events.root")
tree = file_events.Get("events")
nEvents = tree.GetEntries()
print 'Number of events = '+ str(nEvents)
# xrange(n) = 0,1,2,...,n-1
#for iEv in xrange(nEvents):
# tree.GetEntry(iEv)
# nParticles = tree.nPart
# print 'Number of particle = ' + str(nParticles)
hist_dimuon_mass_nBins = 100
hist_dimuon_mass_massMin = 0
hist_dimuon_mass_massMax = 500
h = TH1D("histo_dimuon_mass","di-muon mass spectrum;m [GeV^{2}/c];Entries", hist_dimuon_mass_nBins, hist_dimuon_mass_massMin, hist_dimuon_mass_massMax )
h.Draw()
#raw_input("Press return to exit.")
|
denglert/dimuon
|
dimuon.py
|
Python
|
mit
| 695
|
#!/usr/bin/python
from aws_client import AWSClient
import json
class BasicDiscoverer(AWSClient):
def get_instances(self, *args):
"Runs discovery method and packs result into JSON"
data = self.discovery(*args)
return json.dumps({"data": data})
def discovery(self, *args):
"Method that should be overriden inside inherited classes"
pass
|
wawastein/zabbix-cloudwatch
|
zabbix-scripts/scripts/discovery/basic_discovery.py
|
Python
|
gpl-3.0
| 386
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module contains the class describing the coordination geometries that can exist in a given structure. These
"model" coordination geometries are described in the following articles :
- Pure Appl. Chem., Vol. 79, No. 10, pp. 1779--1799, 2007.
- Acta Cryst. A, Vol. 46, No. 1, pp. 1--11, 1990.
The module also contains descriptors of part of these geometries (plane of separation, ...) that are used in the
identification algorithms.
"""
__author__ = "David Waroquiers"
__copyright__ = "Copyright 2012, The Materials Project"
__credits__ = "Geoffroy Hautier"
__version__ = "2.0"
__maintainer__ = "David Waroquiers"
__email__ = "david.waroquiers@gmail.com"
__date__ = "Feb 20, 2016"
import numpy as np
from scipy.special import factorial
import itertools
import abc
from monty.json import MSONable, MontyDecoder
import json
import os
module_dir = os.path.dirname(os.path.abspath(__file__))
UNKNOWN_ENVIRONMENT_SYMBOL = 'UNKNOWN'
UNCLEAR_ENVIRONMENT_SYMBOL = 'UNCLEAR'
EXPLICIT_PERMUTATIONS = 'EXPLICIT_PERMUTATIONS'
SEPARATION_PLANE = 'SEPARATION_PLANE'
class AbstractChemenvAlgorithm(MSONable, metaclass=abc.ABCMeta):
"""
Class used to define a Chemenv strategy for the neighbors and coordination environment to be applied to a
StructureEnvironments object
"""
def __init__(self, algorithm_type):
self._algorithm_type = algorithm_type
@abc.abstractmethod
def as_dict(self):
"""
A JSON serializable dict representation of the algorithm
"""
pass
@property
def algorithm_type(self):
return self._algorithm_type
@abc.abstractmethod
def __str__(self):
return
class ExplicitPermutationsAlgorithm(AbstractChemenvAlgorithm):
def __init__(self, permutations):
"""
Initializes a separation plane for a given perfect coordination geometry
"""
super().__init__(
algorithm_type=EXPLICIT_PERMUTATIONS)
self._permutations = permutations
def __str__(self):
return self.algorithm_type
@property
def permutations(self):
return self._permutations
@property
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"permutations": self._permutations}
@classmethod
def from_dict(cls, dd):
return cls(dd['permutations'])
class SeparationPlane(AbstractChemenvAlgorithm):
def __init__(self, plane_points, mirror_plane=False, ordered_plane=False,
point_groups=None,
ordered_point_groups=None, # include_inverted_plane=False,
point_groups_permutations=None,
# do_inverse_pt_gp_permutations=False, plane_type='MIRROR',
explicit_permutations=None, minimum_number_of_points=None,
explicit_optimized_permutations=None,
multiplicity=None,
other_plane_points=None): # , plane_safe_permutations=False):
"""
Initializes a separation plane for a given perfect coordination geometry
:param mirror_plane: True if the separation plane is a mirror plane, in which case there is a correspondence
of the points in each point_group (can reduce the number of permutations)
:param ordered_plane : True if the order of the points in the plane can be taken into account to reduce the
number of permutations
:param plane_points: Indices of the points that are in the plane in the perfect structure (and should be
found in the defective one as well)
:param point_groups: The two groups of points separated by the plane
:param plane_type: can be "MIRROR", if the plane is a mirror plane going through the central site,
'BASAL_THROUGH_CENTER', if the plane is a basal plane (no point on the "left" side) going through the central
site, 'BASAL', if the is a basal plane not going through the central site, 'UNEQUILIBRATED_THROUGH_CENTER', if
the plane cuts the geometry in two groups of points with different numbers of points on each side, and is going
through the centre, 'UNEQUILIBRATED', if the plane cuts the geometry in two groups of points with different
numbers of points on each side, and is not going through the centre, 'EQUILIBRATED_THROUGH_CENTER', if the
plane cuts the geometry in two groups of points of the same size, is going through the centre but is not a
mirror plane, 'EQUILIBRATED', if the plane cuts the geometry in two groups of points of the same size, is not
going through the centre but is not a mirror plane.
"""
super().__init__(algorithm_type=SEPARATION_PLANE)
self.mirror_plane = mirror_plane
self.plane_points = plane_points
self.point_groups = point_groups
if len(point_groups[0]) > len(point_groups[1]):
raise RuntimeError(
"The number of points in the first group should be\n"
"less than or equal to the number of points in the second group")
self._hash = 10000 * len(plane_points) + 100 * len(
point_groups[0]) + len(point_groups[1])
self.ordered_plane = ordered_plane
self.ordered_point_groups = [False,
False] if ordered_point_groups is None else ordered_point_groups
self._ordered_indices = list(point_groups[0])
self._ordered_indices.extend(plane_points)
self._ordered_indices.extend(point_groups[1])
self._inv_ordered_indices = np.argsort(self._ordered_indices)
self._point_groups_permutations = point_groups_permutations
self.explicit_permutations = explicit_permutations
self.explicit_optimized_permutations = explicit_optimized_permutations
self._safe_permutations = None
if self.explicit_optimized_permutations is not None:
self._permutations = self.explicit_optimized_permutations
elif self.explicit_permutations is not None:
self._permutations = self.explicit_permutations
self.multiplicity = multiplicity
self.other_plane_points = other_plane_points
self.minimum_number_of_points = minimum_number_of_points
self.maximum_number_of_points = len(self.plane_points)
self._ref_separation_perm = list(self.point_groups[0])
self._ref_separation_perm.extend(list(self.plane_points))
self._ref_separation_perm.extend(list(self.point_groups[1]))
self._argsorted_ref_separation_perm = list(
np.argsort(self._ref_separation_perm))
self.separation = (len(point_groups[0]), len(plane_points), len(point_groups[1]))
@property
def ordered_indices(self):
return self._ordered_indices
@property
def inv_ordered_indices(self):
return self._inv_ordered_indices
@property
def permutations(self):
return self._permutations
@property
def ref_separation_perm(self):
return self._ref_separation_perm
@property
def argsorted_ref_separation_perm(self):
return self._argsorted_ref_separation_perm
# def safe_plane_permutations(self, ordered_plane=False,
# ordered_point_groups=None):
# ordered_point_groups = [False,
# False] if ordered_point_groups is None else ordered_point_groups
# rotate = lambda s, n: s[-n:] + s[:-n]
# if ordered_plane and self.ordered_plane:
# plane_perms = [rotate(self.plane_points, ii) for ii in
# range(len(self.plane_points))]
# invplanepoints = self.plane_points[::-1]
# plane_perms.extend([rotate(invplanepoints, ii) for ii in
# range(len(self.plane_points) - 1, -1, -1)])
# else:
# plane_perms = list(itertools.permutations(self.plane_points))
# if ordered_point_groups[0] and self.ordered_point_groups[0]:
# s0_perms = [rotate(self.point_groups[0], ii) for ii in
# range(len(self.point_groups[0]))]
# invpg0 = self.point_groups[0][::-1]
# s0_perms.extend([rotate(invpg0, ii) for ii in range(len(invpg0))])
# else:
# s0_perms = list(itertools.permutations(self.point_groups[0]))
# if ordered_point_groups[1] and self.ordered_point_groups[1]:
# s2_perms = [rotate(self.point_groups[1], ii) for ii in
# range(len(self.point_groups[1]))]
# invpg2 = self.point_groups[1][::-1]
# s2_perms.extend([rotate(invpg2, ii) for ii in range(len(invpg2))])
# else:
# s2_perms = list(itertools.permutations(self.point_groups[1]))
# add_opposite = False
# if self._safe_permutations is None:
# self._safe_permutations = []
# for perm_side1 in s0_perms:
# for perm_sep_plane in plane_perms:
# for perm_side2 in s2_perms:
# perm = list(perm_side1)
# perm.extend(list(perm_sep_plane))
# perm.extend(list(perm_side2))
# self._safe_permutations.append(perm)
# if add_opposite:
# perm = list(perm_side2)
# perm.extend(list(perm_sep_plane))
# perm.extend(list(perm_side1))
# self._safe_permutations.append(perm)
# return self._safe_permutations
def safe_separation_permutations(self, ordered_plane=False,
ordered_point_groups=None,
add_opposite=False):
s0 = range(len(self.point_groups[0]))
plane = range(len(self.point_groups[0]),
len(self.point_groups[0]) + len(self.plane_points))
s1 = range(len(self.point_groups[0]) + len(self.plane_points),
len(self.point_groups[0]) + len(self.plane_points) + len(
self.point_groups[1]))
ordered_point_groups = [False,
False] if ordered_point_groups is None else ordered_point_groups
rotate = lambda s, n: s[-n:] + s[:-n]
if ordered_plane and self.ordered_plane:
plane_perms = [rotate(plane, ii) for ii in range(len(plane))]
inv_plane = plane[::-1]
plane_perms.extend(
[rotate(inv_plane, ii) for ii in range(len(inv_plane))])
else:
plane_perms = list(itertools.permutations(plane))
if ordered_point_groups[0] and self.ordered_point_groups[0]:
s0_perms = [rotate(s0, ii) for ii in range(len(s0))]
inv_s0 = s0[::-1]
s0_perms.extend([rotate(inv_s0, ii) for ii in range(len(inv_s0))])
else:
s0_perms = list(itertools.permutations(s0))
if ordered_point_groups[1] and self.ordered_point_groups[1]:
s1_perms = [rotate(s1, ii) for ii in range(len(s1))]
inv_s1 = s1[::-1]
s1_perms.extend([rotate(inv_s1, ii) for ii in range(len(inv_s1))])
else:
s1_perms = list(itertools.permutations(s1))
if self._safe_permutations is None:
self._safe_permutations = []
for perm_side1 in s0_perms:
for perm_sep_plane in plane_perms:
for perm_side2 in s1_perms:
perm = list(perm_side1)
perm.extend(list(perm_sep_plane))
perm.extend(list(perm_side2))
self._safe_permutations.append(perm)
if add_opposite:
perm = list(perm_side2)
perm.extend(list(perm_sep_plane))
perm.extend(list(perm_side1))
self._safe_permutations.append(perm)
return self._safe_permutations
@property
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"plane_points": self.plane_points,
"mirror_plane": self.mirror_plane,
"ordered_plane": self.ordered_plane,
"point_groups": self.point_groups,
"ordered_point_groups": self.ordered_point_groups,
"point_groups_permutations": self._point_groups_permutations,
"explicit_permutations": [eperm.tolist() for eperm in self.explicit_permutations]
if self.explicit_permutations is not None else None,
"explicit_optimized_permutations": [eoperm.tolist()
for eoperm in self.explicit_optimized_permutations]
if self.explicit_optimized_permutations is not None else None,
"multiplicity": self.multiplicity,
"other_plane_points": self.other_plane_points,
"minimum_number_of_points": self.minimum_number_of_points}
@classmethod
def from_dict(cls, dd):
eop = [np.array(eoperm) for eoperm in dd[
'explicit_optimized_permutations']] if 'explicit_optimized_permutations' in dd else None
return cls(plane_points=dd['plane_points'],
mirror_plane=dd['mirror_plane'],
ordered_plane=dd['ordered_plane'],
point_groups=dd['point_groups'],
ordered_point_groups=dd['ordered_point_groups'],
point_groups_permutations=dd['point_groups_permutations'],
explicit_permutations=[np.array(eperm) for eperm in dd['explicit_permutations']],
explicit_optimized_permutations=eop,
multiplicity=dd[
'multiplicity'] if 'multiplicity' in dd else None,
other_plane_points=dd[
'other_plane_points'] if 'other_plane_points' in dd else None,
minimum_number_of_points=dd['minimum_number_of_points'])
def __str__(self):
out = 'Separation plane algorithm with the following reference separation :\n'
out += '[{}] | [{}] | [{}]'.format(
'-'.join(str(pp) for pp in [self.point_groups[0]]),
'-'.join(str(pp) for pp in [self.plane_points]),
'-'.join(str(pp) for pp in [self.point_groups[1]]),
)
return out
class CoordinationGeometry:
"""
Class used to store the ideal representation of a chemical environment or "coordination geometry"
"""
CSM_SKIP_SEPARATION_PLANE_ALGO = 10.0 # Default value of continuous symmetry measure below which no further
# search is performed for the separation plane algorithms
class NeighborsSetsHints:
ALLOWED_HINTS_TYPES = ['single_cap', 'double_cap', 'triple_cap']
def __init__(self, hints_type, options):
if hints_type not in self.ALLOWED_HINTS_TYPES:
raise ValueError('Type "{}" for NeighborsSetsHints is not allowed'.format(type))
self.hints_type = hints_type
self.options = options
def hints(self, hints_info):
if hints_info['csm'] > self.options['csm_max']:
return []
return object.__getattribute__(self, '{}_hints'.format(self.hints_type))(hints_info)
def single_cap_hints(self, hints_info):
cap_index_perfect = self.options['cap_index']
nb_set = hints_info['nb_set']
permutation = hints_info['permutation']
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[cap_index_perfect]
new_site_voronoi_indices = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices.remove(cap_voronoi_index)
return [new_site_voronoi_indices]
def double_cap_hints(self, hints_info):
first_cap_index_perfect = self.options['first_cap_index']
second_cap_index_perfect = self.options['second_cap_index']
nb_set = hints_info['nb_set']
permutation = hints_info['permutation']
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
first_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[first_cap_index_perfect]
second_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[second_cap_index_perfect]
new_site_voronoi_indices1 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices2 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices3 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices1.remove(first_cap_voronoi_index)
new_site_voronoi_indices2.remove(second_cap_voronoi_index)
new_site_voronoi_indices3.remove(first_cap_voronoi_index)
new_site_voronoi_indices3.remove(second_cap_voronoi_index)
return [new_site_voronoi_indices1, new_site_voronoi_indices2, new_site_voronoi_indices3]
def triple_cap_hints(self, hints_info):
first_cap_index_perfect = self.options['first_cap_index']
second_cap_index_perfect = self.options['second_cap_index']
third_cap_index_perfect = self.options['third_cap_index']
nb_set = hints_info['nb_set']
permutation = hints_info['permutation']
nb_set_voronoi_indices_perfect_aligned = nb_set.get_neighb_voronoi_indices(permutation=permutation)
first_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[first_cap_index_perfect]
second_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[second_cap_index_perfect]
third_cap_voronoi_index = nb_set_voronoi_indices_perfect_aligned[third_cap_index_perfect]
new_site_voronoi_indices1 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices2 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices3 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices4 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices5 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices6 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices7 = list(nb_set.site_voronoi_indices)
new_site_voronoi_indices1.remove(first_cap_voronoi_index)
new_site_voronoi_indices2.remove(second_cap_voronoi_index)
new_site_voronoi_indices3.remove(third_cap_voronoi_index)
new_site_voronoi_indices4.remove(second_cap_voronoi_index)
new_site_voronoi_indices4.remove(third_cap_voronoi_index)
new_site_voronoi_indices5.remove(first_cap_voronoi_index)
new_site_voronoi_indices5.remove(third_cap_voronoi_index)
new_site_voronoi_indices6.remove(first_cap_voronoi_index)
new_site_voronoi_indices6.remove(second_cap_voronoi_index)
new_site_voronoi_indices7.remove(first_cap_voronoi_index)
new_site_voronoi_indices7.remove(second_cap_voronoi_index)
new_site_voronoi_indices7.remove(third_cap_voronoi_index)
return [new_site_voronoi_indices1, new_site_voronoi_indices2, new_site_voronoi_indices3,
new_site_voronoi_indices4, new_site_voronoi_indices5, new_site_voronoi_indices6,
new_site_voronoi_indices7]
def as_dict(self):
return {'hints_type': self.hints_type,
'options': self.options}
@classmethod
def from_dict(cls, dd):
return cls(hints_type=dd['hints_type'],
options=dd['options'])
def __init__(self, mp_symbol, name, alternative_names=None,
IUPAC_symbol=None, IUCr_symbol=None, coordination=None,
central_site=np.zeros(3), points=None, solid_angles=None,
permutations_safe_override=False,
plane_ordering_override=True, deactivate=False, faces=None,
edges=None,
plane_safe_permutations=False, algorithms=None,
equivalent_indices=None,
neighbors_sets_hints=None):
"""
Initializes one "coordination geometry" according to [Pure Appl. Chem., Vol. 79, No. 10, pp. 1779--1799, 2007]
and [Acta Cryst. A, Vol. 46, No. 1, pp. 1--11, 1990].
:param mp_symbol: Symbol used internally for the coordination geometry.
:param name: Name of the coordination geometry.
:param alternative_names: Alternative names for this coordination geometry.
:param IUPAC_symbol: The IUPAC symbol of this coordination geometry.
:param IUCr_symbol: The IUCr symbol of this coordination geometry.
:param coordination: The coordination number of this coordination geometry (number of neighboring atoms).
:param central_site: The coordinates of the central site of this coordination geometry.
:param points: The list of the coordinates of all the points of this coordination geometry.
:param separation_planes: List of separation facets to help set up the permutations
:param permutation_safe_override: Computes all the permutations if set to True (overrides the plane separation
algorithms or any other algorithm, for testing purposes)
:param plane_ordering_override: Computes all the permutations of the plane separation algorithm if set to False
otherwise, uses the anticlockwise ordering of the separation facets (for testing purposes)
:param deactivate: deactivates this coordination geometry in the search
:param faces : list of the faces with their vertices given in a clockwise or anticlockwise order, for drawing
purposes
:param : list of edges, for drawing purposes
"""
self._mp_symbol = mp_symbol
self.name = name
self.alternative_names = alternative_names if alternative_names is not None else []
self.IUPACsymbol = IUPAC_symbol
self.IUCrsymbol = IUCr_symbol
self.coordination = coordination
self.central_site = np.array(central_site)
self.points = points
self._solid_angles = solid_angles
self.permutations_safe_override = permutations_safe_override
self.plane_ordering_override = plane_ordering_override
self.plane_safe_permutations = plane_safe_permutations
# self.setup_permutations(permutations)
self.deactivate = deactivate
self._faces = faces
self._edges = edges
self._algorithms = algorithms
if points is not None:
self.centroid = np.mean(np.array(points), axis=0)
else:
self.centroid = None
self.equivalent_indices = equivalent_indices
self.neighbors_sets_hints = neighbors_sets_hints
self._pauling_stability_ratio = None
def as_dict(self):
return {'mp_symbol': self._mp_symbol,
'name': self.name,
'alternative_names': self.alternative_names,
'IUPAC_symbol': self.IUPACsymbol,
'IUCr_symbol': self.IUCrsymbol,
'coordination': self.coordination,
'central_site': [float(xx) for xx in self.central_site],
'points': [[float(xx) for xx in pp] for pp in
self.points] if self.points is not None else None,
'solid_angles': [float(ang) for ang in
self._solid_angles] if self._solid_angles is not None else None,
'deactivate': self.deactivate,
'_faces': self._faces,
'_edges': self._edges,
'_algorithms': [algo.as_dict for algo in
self._algorithms] if self._algorithms is not None else None,
'equivalent_indices': self.equivalent_indices,
'neighbors_sets_hints': [nbsh.as_dict() for nbsh in self.neighbors_sets_hints]
if self.neighbors_sets_hints is not None else None}
@classmethod
def from_dict(cls, dd):
dec = MontyDecoder()
return cls(mp_symbol=dd['mp_symbol'],
name=dd['name'],
alternative_names=dd['alternative_names'],
IUPAC_symbol=dd['IUPAC_symbol'],
IUCr_symbol=dd['IUCr_symbol'],
coordination=dd['coordination'],
central_site=dd['central_site'],
points=dd['points'],
solid_angles=(dd['solid_angles'] if 'solid_angles' in dd
else [4.0 * np.pi / dd['coordination']] * dd[
'coordination']),
deactivate=dd['deactivate'],
faces=dd['_faces'],
edges=dd['_edges'],
algorithms=[dec.process_decoded(algo_d)
for algo_d in dd['_algorithms']] if dd['_algorithms'] is not None else None,
equivalent_indices=dd[
'equivalent_indices'] if 'equivalent_indices' in dd else None,
neighbors_sets_hints=[cls.NeighborsSetsHints.from_dict(nbshd)
for nbshd in dd['neighbors_sets_hints']]
if ('neighbors_sets_hints' in dd and dd['neighbors_sets_hints'] is not None) else None)
def __str__(self):
symbol = ''
if self.IUPAC_symbol is not None:
symbol += ' (IUPAC: {s}'.format(s=self.IUPAC_symbol)
if self.IUCr_symbol is not None:
symbol += ' || IUCr: {s})'.format(s=self.IUCr_symbol)
else:
symbol += ')'
elif self.IUCr_symbol is not None:
symbol += ' (IUCr: {s})'.format(s=self.IUCr_symbol)
outs = ['Coordination geometry type : {n}{s}\n'.format(n=self.name,
s=symbol),
' - coordination number : {c}'.format(c=self.coordination)]
if self.points is None:
outs.append('... not yet implemented')
else:
outs.append(' - list of points :')
for pp in self.points:
outs.append(' - {p}'.format(p=pp))
outs.append(
'------------------------------------------------------------')
outs.append('')
return '\n'.join(outs)
def __repr__(self):
symbol = ''
if self.IUPAC_symbol is not None:
symbol += ' (IUPAC: {s}'.format(s=self.IUPAC_symbol)
if self.IUCr_symbol is not None:
symbol += ' || IUCr: {s})'.format(s=self.IUCr_symbol)
else:
symbol += ')'
elif self.IUCr_symbol is not None:
symbol += ' (IUCr: {s})'.format(s=self.IUCr_symbol)
outs = ['Coordination geometry type : {n}{s}\n'.format(n=self.name,
s=symbol),
' - coordination number : {c}'.format(c=self.coordination)]
outs.append(
'------------------------------------------------------------')
outs.append('')
return '\n'.join(outs)
def __len__(self):
return self.coordination
def set_permutations_safe_override(self, permutations_safe_override):
self.permutations_safe_override = permutations_safe_override
# self.setup_permutations()
@property
def csm_skip_algo(self):
return self.CSM_SKIP_SEPARATION_PLANE_ALGO
@property
def distfactor_max(self):
dists = [np.linalg.norm(pp - self.central_site) for pp in self.points]
return np.max(dists) / np.min(dists)
@property
def coordination_number(self):
"""
Returns the coordination number of this coordination geometry.
"""
return self.coordination
@property
def pauling_stability_ratio(self):
"""
Returns the theoretical Pauling stability ratio (rC/rA) for this environment.
"""
if self._pauling_stability_ratio is None:
if self.ce_symbol in ['S:1', 'L:2']:
self._pauling_stability_ratio = 0.0
else:
mindist_anions = 1000000.0
mindist_cation_anion = 1000000.0
for ipt1 in range(len(self.points)):
pt1 = np.array(self.points[ipt1])
mindist_cation_anion = min(mindist_cation_anion,
np.linalg.norm(pt1-self.central_site))
for ipt2 in range(ipt1+1, len(self.points)):
pt2 = np.array(self.points[ipt2])
mindist_anions = min(mindist_anions,
np.linalg.norm(pt1-pt2))
anion_radius = mindist_anions / 2.0
cation_radius = mindist_cation_anion - anion_radius
self._pauling_stability_ratio = cation_radius / anion_radius
return self._pauling_stability_ratio
@property
def mp_symbol(self):
"""
Returns the MP symbol of this coordination geometry.
"""
return self._mp_symbol
@property
def ce_symbol(self):
"""
Returns the symbol of this coordination geometry.
"""
return self._mp_symbol
def get_coordination_number(self):
"""
Returns the coordination number of this coordination geometry.
"""
return self.coordination
def is_implemented(self):
"""
Returns True if this coordination geometry is implemented.
"""
return bool(self.points)
def get_name(self):
"""
Returns the name of this coordination geometry.
"""
return self.name
@property
def IUPAC_symbol(self):
"""
Returns the IUPAC symbol of this coordination geometry.
"""
return self.IUPACsymbol
@property
def IUPAC_symbol_str(self):
"""
Returns a string representation of the IUPAC symbol of this coordination geometry.
"""
return str(self.IUPACsymbol)
@property
def IUCr_symbol(self):
"""
Returns the IUCr symbol of this coordination geometry.
"""
return self.IUCrsymbol
@property
def IUCr_symbol_str(self):
"""
Returns a string representation of the IUCr symbol of this coordination geometry.
"""
return str(self.IUCrsymbol)
@property
def number_of_permutations(self):
"""
Returns the number of permutations of this coordination geometry.
"""
if self.permutations_safe_override:
return factorial(self.coordination)
elif self.permutations is None:
return factorial(self.coordination)
return len(self.permutations)
def ref_permutation(self, permutation):
perms = []
for eqv_indices in self.equivalent_indices:
perms.append(tuple([permutation[ii] for ii in eqv_indices]))
perms.sort()
return perms[0]
@property
def algorithms(self):
"""
Returns the list of algorithms that are used to identify this coordination geometry.
"""
return self._algorithms
def get_central_site(self):
"""
Returns the central site of this coordination geometry.
"""
return self.central_site
def faces(self, sites, permutation=None):
"""
Returns the list of faces of this coordination geometry. Each face is given as a
list of its vertices coordinates.
"""
if permutation is None:
coords = [site.coords for site in sites]
else:
coords = [sites[ii].coords for ii in permutation]
return [[coords[ii] for ii in f] for f in self._faces]
def edges(self, sites, permutation=None, input='sites'):
"""
Returns the list of edges of this coordination geometry. Each edge is given as a
list of its end vertices coordinates.
"""
if input == 'sites':
coords = [site.coords for site in sites]
elif input == 'coords':
coords = sites
# if permutation is None:
# coords = [site.coords for site in sites]
# else:
# coords = [sites[ii].coords for ii in permutation]
if permutation is not None:
coords = [coords[ii] for ii in permutation]
return [[coords[ii] for ii in e] for e in self._edges]
def solid_angles(self, permutation=None):
"""
Returns the list of "perfect" solid angles Each edge is given as a
list of its end vertices coordinates.
"""
if permutation is None:
return self._solid_angles
else:
return [self._solid_angles[ii] for ii in permutation]
def get_pmeshes(self, sites, permutation=None):
"""
Returns the pmesh strings used for jmol to show this geometry.
"""
pmeshes = []
# _vertices = [site.coords for site in sites]
if permutation is None:
_vertices = [site.coords for site in sites]
else:
_vertices = [sites[ii].coords for ii in permutation]
_face_centers = []
number_of_faces = 0
for face in self._faces:
if len(face) in [3, 4]:
number_of_faces += 1
else:
number_of_faces += len(face)
_face_centers.append(np.array([np.mean([_vertices[face_vertex][ii]
for face_vertex in face])
for ii in range(3)]))
out = '{}\n'.format(len(_vertices) + len(_face_centers))
for vv in _vertices:
out += '{:15.8f} {:15.8f} {:15.8f}\n'.format(vv[0], vv[1], vv[2])
for fc in _face_centers:
out += '{:15.8f} {:15.8f} {:15.8f}\n'.format(fc[0], fc[1], fc[2])
out += '{:d}\n'.format(number_of_faces)
for iface, face in enumerate(self._faces):
if len(face) == 3:
out += '4\n'
elif len(face) == 4:
out += '5\n'
else:
for ii in range(len(face)):
out += '4\n'
out += '{:d}\n'.format(len(_vertices) + iface)
out += '{:d}\n'.format(face[ii])
out += '{:d}\n'.format(face[np.mod(ii + 1, len(face))])
out += '{:d}\n'.format(len(_vertices) + iface)
if len(face) in [3, 4]:
for face_vertex in face:
out += '{:d}\n'.format(face_vertex)
out += '{:d}\n'.format(face[0])
pmeshes.append({"pmesh_string": out})
return pmeshes
class AllCoordinationGeometries(dict):
"""
Class used to store all the reference "coordination geometries" (list with instances of the CoordinationGeometry
classes)
"""
def __init__(self, permutations_safe_override=False, only_symbols=None):
"""
Initializes the list of Coordination Geometries
:param permutations_safe_override:
:param only_symbols:
"""
dict.__init__(self)
self.cg_list = list()
if only_symbols is None:
f = open(
'{}/coordination_geometries_files/allcg.txt'.format(module_dir),
'r')
data = f.readlines()
f.close()
for line in data:
cg_file = '{}/{}'.format(module_dir, line.strip())
f = open(cg_file, 'r')
dd = json.load(f)
f.close()
self.cg_list.append(CoordinationGeometry.from_dict(dd))
else:
for symbol in only_symbols:
fsymbol = symbol.replace(':', '#')
cg_file = '{}/coordination_geometries_files/{}.json'.format(
module_dir, fsymbol)
f = open(cg_file, 'r')
dd = json.load(f)
f.close()
self.cg_list.append(CoordinationGeometry.from_dict(dd))
self.cg_list.append(CoordinationGeometry(UNKNOWN_ENVIRONMENT_SYMBOL,
"Unknown environment",
deactivate=True))
self.cg_list.append(CoordinationGeometry(UNCLEAR_ENVIRONMENT_SYMBOL,
"Unclear environment",
deactivate=True))
if permutations_safe_override:
for cg in self.cg_list:
cg.set_permutations_safe_override(True)
self.minpoints = {}
self.maxpoints = {}
self.separations_cg = {}
for cn in range(6, 14):
for cg in self.get_implemented_geometries(coordination=cn):
if only_symbols is not None and cg.ce_symbol not in only_symbols:
continue
if cn not in self.separations_cg:
self.minpoints[cn] = 1000
self.maxpoints[cn] = 0
self.separations_cg[cn] = {}
for algo in cg.algorithms:
sep = (len(algo.point_groups[0]),
len(algo.plane_points),
len(algo.point_groups[1]))
if sep not in self.separations_cg[cn]:
self.separations_cg[cn][sep] = []
self.separations_cg[cn][sep].append(cg.mp_symbol)
self.minpoints[cn] = min(self.minpoints[cn], algo.minimum_number_of_points)
self.maxpoints[cn] = max(self.maxpoints[cn], algo.maximum_number_of_points)
self.maxpoints_inplane = {cn: max([sep[1] for sep in seps.keys()]) for cn, seps in self.separations_cg.items()}
def __getitem__(self, key):
return self.get_geometry_from_mp_symbol(key)
def __repr__(self):
"""
Returns a string with the list of coordination geometries.
"""
outs = ['', '#=================================#',
'# List of coordination geometries #',
'#=================================#', '']
for cg in self.cg_list:
outs.append(repr(cg))
return '\n'.join(outs)
def __str__(self):
"""
Returns a string with the list of coordination geometries that are implemented.
"""
outs = ['', '#=======================================================#',
'# List of coordination geometries currently implemented #',
'#=======================================================#', '']
for cg in self.cg_list:
if cg.is_implemented():
outs.append(str(cg))
return '\n'.join(outs)
def get_geometries(self, coordination=None, returned='cg'):
"""
Returns a list of coordination geometries with the given coordination number.
:param coordination: The coordination number of which the list of coordination geometries are returned.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
return geom
def get_symbol_name_mapping(self, coordination=None):
geom = {}
if coordination is None:
for gg in self.cg_list:
geom[gg.mp_symbol] = gg.name
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
geom[gg.mp_symbol] = gg.name
return geom
def get_symbol_cn_mapping(self, coordination=None):
geom = {}
if coordination is None:
for gg in self.cg_list:
geom[gg.mp_symbol] = gg.coordination_number
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination:
geom[gg.mp_symbol] = gg.coordination_number
return geom
def get_implemented_geometries(self, coordination=None, returned='cg',
include_deactivated=False):
"""
Returns a list of the implemented coordination geometries with the given coordination number.
:param coordination: The coordination number of which the list of implemented coordination geometries
are returned.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if gg.points is not None and (
(not gg.deactivate) or include_deactivated):
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination and gg.points is not None and \
((not gg.deactivate) or include_deactivated):
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
return geom
def get_not_implemented_geometries(self, coordination=None,
returned='mp_symbol'):
"""
Returns a list of the implemented coordination geometries with the given coordination number.
:param coordination: The coordination number of which the list of implemented coordination geometries
are returned.
"""
geom = list()
if coordination is None:
for gg in self.cg_list:
if gg.points is None:
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
else:
for gg in self.cg_list:
if gg.get_coordination_number() == coordination and gg.points is None:
if returned == 'cg':
geom.append(gg)
elif returned == 'mp_symbol':
geom.append(gg.mp_symbol)
return geom
def get_geometry_from_name(self, name):
"""
Returns the coordination geometry of the given name.
:param name: The name of the coordination geometry.
"""
for gg in self.cg_list:
if gg.name == name or name in gg.alternative_names:
return gg
raise LookupError(
'No coordination geometry found with name "{name}"'.format(
name=name))
def get_geometry_from_IUPAC_symbol(self, IUPAC_symbol):
"""
Returns the coordination geometry of the given IUPAC symbol.
:param IUPAC_symbol: The IUPAC symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.IUPAC_symbol == IUPAC_symbol:
return gg
raise LookupError(
'No coordination geometry found with IUPAC symbol "{symbol}"'.format(
symbol=IUPAC_symbol))
def get_geometry_from_IUCr_symbol(self, IUCr_symbol):
"""
Returns the coordination geometry of the given IUCr symbol.
:param IUCr_symbol: The IUCr symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.IUCr_symbol == IUCr_symbol:
return gg
raise LookupError(
'No coordination geometry found with IUCr symbol "{symbol}"'.format(
symbol=IUCr_symbol))
def get_geometry_from_mp_symbol(self, mp_symbol):
"""
Returns the coordination geometry of the given mp_symbol.
:param mp_symbol: The mp_symbol of the coordination geometry.
"""
for gg in self.cg_list:
if gg.mp_symbol == mp_symbol:
return gg
raise LookupError(
'No coordination geometry found with mp_symbol "{symbol}"'.format(
symbol=mp_symbol))
def is_a_valid_coordination_geometry(self, mp_symbol=None,
IUPAC_symbol=None, IUCr_symbol=None,
name=None, cn=None):
"""
Checks whether a given coordination geometry is valid (exists) and whether the parameters are coherent with
each other.
:param IUPAC_symbol:
:param IUCr_symbol:
:param name:
:param cn:
:param mp_symbol: The mp_symbol of the coordination geometry.
"""
if name is not None:
raise NotImplementedError(
'is_a_valid_coordination_geometry not implemented for the name')
if mp_symbol is None and IUPAC_symbol is None and IUCr_symbol is None:
raise SyntaxError(
'missing argument for is_a_valid_coordination_geometry : at least one of mp_symbol, '
'IUPAC_symbol and IUCr_symbol must be passed to the function')
if mp_symbol is not None:
try:
cg = self.get_geometry_from_mp_symbol(mp_symbol)
if IUPAC_symbol is not None:
if IUPAC_symbol != cg.IUPAC_symbol:
return False
if IUCr_symbol is not None:
if IUCr_symbol != cg.IUCr_symbol:
return False
if cn is not None:
if int(cn) != int(cg.coordination_number):
return False
return True
except LookupError:
return False
elif IUPAC_symbol is not None:
try:
cg = self.get_geometry_from_IUPAC_symbol(IUPAC_symbol)
if IUCr_symbol is not None:
if IUCr_symbol != cg.IUCr_symbol:
return False
if cn is not None:
if cn != cg.coordination_number:
return False
return True
except LookupError:
return False
elif IUCr_symbol is not None:
try:
cg = self.get_geometry_from_IUCr_symbol(IUCr_symbol)
if cn is not None:
if cn != cg.coordination_number:
return False
return True
except LookupError:
return True
raise Exception('Should not be here !')
def pretty_print(self, type='implemented_geometries', maxcn=8, additional_info=None):
if type == 'all_geometries_latex_images':
mystring = ''
for cn in range(1, maxcn + 1):
mystring += '\\section*{{Coordination {cn}}}\n\n'.format(cn=cn)
for cg in self.get_implemented_geometries(coordination=cn,
returned='cg'):
mystring += '\\subsubsection*{{{mp} : {name}}}\n\n'.format(
mp=cg.mp_symbol, name=cg.get_name())
mystring += 'IUPAC : {iupac}\n\nIUCr : {iucr}\n\n'.format(
iupac=cg.IUPAC_symbol, iucr=cg.IUCr_symbol)
mystring += '\\begin{center}\n'
mystring += '\\includegraphics[scale=0.15]{{images/{let}_{cif}.png}}\n'.format(
let=cg.mp_symbol.split(':')[0],
cif=cg.mp_symbol.split(':')[1])
mystring += '\\end{center}\n\n'
for cg in self.get_not_implemented_geometries(cn,
returned='cg'):
mystring += '\\subsubsection*{{{mp} : {name}}}\n\n'.format(
mp=cg.mp_symbol, name=cg.get_name())
mystring += 'IUPAC : {iupac}\n\nIUCr : {iucr}\n\n'.format(
iupac=cg.IUPAC_symbol, iucr=cg.IUCr_symbol)
elif type == 'all_geometries_latex':
mystring = ''
for cn in range(1, maxcn + 1):
mystring += '\\subsection*{{Coordination {cn}}}\n\n'.format(
cn=cn)
mystring += '\\begin{itemize}\n'
for cg in self.get_implemented_geometries(coordination=cn,
returned='cg'):
mystring += '\\item {mp} $\\rightarrow$ {name} '.format(
mp=cg.mp_symbol.replace('_',
'\\_'),
name=cg.get_name())
mystring += '(IUPAC : {iupac} - IUCr : {iucr})\n'.format(
iupac=cg.IUPAC_symbol_str,
iucr=cg.IUCr_symbol_str.replace('[', '$[$').replace(']',
'$]$'))
for cg in self.get_not_implemented_geometries(cn,
returned='cg'):
mystring += '\\item {mp} $\\rightarrow$ {name} '.format(
mp=cg.mp_symbol.replace('_',
'\\_'),
name=cg.get_name())
mystring += '(IUPAC : {iupac} - IUCr : {iucr})\n'.format(
iupac=cg.IUPAC_symbol_str,
iucr=cg.IUCr_symbol_str.replace('[', '$[$').replace(']',
'$]$'))
mystring += '\\end{itemize}\n\n'
else:
mystring = '+-------------------------+\n| Coordination geometries |\n+-------------------------+\n\n'
for cn in range(1, maxcn + 1):
mystring += '==>> CN = {cn} <<==\n'.format(cn=cn)
if type == 'implemented_geometries':
for cg in self.get_implemented_geometries(coordination=cn):
if additional_info is not None:
if 'nb_hints' in additional_info:
if cg.neighbors_sets_hints is not None:
addinfo = ' *'
else:
addinfo = ''
else:
addinfo = ''
else:
addinfo = ''
mystring += ' - {mp} : {name}{addinfo}\n'.format(mp=cg.mp_symbol,
name=cg.get_name(),
addinfo=addinfo)
elif type == 'all_geometries':
for cg in self.get_geometries(coordination=cn):
mystring += ' - {mp} : {name}\n'.format(mp=cg.mp_symbol,
name=cg.get_name())
mystring += '\n'
return mystring
|
dongsenfo/pymatgen
|
pymatgen/analysis/chemenv/coordination_environments/coordination_geometries.py
|
Python
|
mit
| 52,397
|
"""ControlPrestamo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from prestamoapp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^prestamos/', include('prestamoapp.urls'))
]
|
marvinAlvarenga/ControlPrestamo
|
ControlPrestamo/urls.py
|
Python
|
gpl-2.0
| 865
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v8.enums.types import (
response_content_type as gage_response_content_type,
)
from google.ads.googleads.v8.resources.types import (
feed_mapping as gagr_feed_mapping,
)
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v8.services",
marshal="google.ads.googleads.v8",
manifest={
"GetFeedMappingRequest",
"MutateFeedMappingsRequest",
"FeedMappingOperation",
"MutateFeedMappingsResponse",
"MutateFeedMappingResult",
},
)
class GetFeedMappingRequest(proto.Message):
r"""Request message for
[FeedMappingService.GetFeedMapping][google.ads.googleads.v8.services.FeedMappingService.GetFeedMapping].
Attributes:
resource_name (str):
Required. The resource name of the feed
mapping to fetch.
"""
resource_name = proto.Field(proto.STRING, number=1,)
class MutateFeedMappingsRequest(proto.Message):
r"""Request message for
[FeedMappingService.MutateFeedMappings][google.ads.googleads.v8.services.FeedMappingService.MutateFeedMappings].
Attributes:
customer_id (str):
Required. The ID of the customer whose feed
mappings are being modified.
operations (Sequence[google.ads.googleads.v8.services.types.FeedMappingOperation]):
Required. The list of operations to perform
on individual feed mappings.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
response_content_type (google.ads.googleads.v8.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
"""
customer_id = proto.Field(proto.STRING, number=1,)
operations = proto.RepeatedField(
proto.MESSAGE, number=2, message="FeedMappingOperation",
)
partial_failure = proto.Field(proto.BOOL, number=3,)
validate_only = proto.Field(proto.BOOL, number=4,)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
class FeedMappingOperation(proto.Message):
r"""A single operation (create, remove) on a feed mapping.
Attributes:
create (google.ads.googleads.v8.resources.types.FeedMapping):
Create operation: No resource name is
expected for the new feed mapping.
remove (str):
Remove operation: A resource name for the removed feed
mapping is expected, in this format:
``customers/{customer_id}/feedMappings/{feed_id}~{feed_mapping_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof="operation",
message=gagr_feed_mapping.FeedMapping,
)
remove = proto.Field(proto.STRING, number=3, oneof="operation",)
class MutateFeedMappingsResponse(proto.Message):
r"""Response message for a feed mapping mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v8.services.types.MutateFeedMappingResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE, number=3, message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE, number=2, message="MutateFeedMappingResult",
)
class MutateFeedMappingResult(proto.Message):
r"""The result for the feed mapping mutate.
Attributes:
resource_name (str):
Returned for successful operations.
feed_mapping (google.ads.googleads.v8.resources.types.FeedMapping):
The mutated feed mapping with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(proto.STRING, number=1,)
feed_mapping = proto.Field(
proto.MESSAGE, number=2, message=gagr_feed_mapping.FeedMapping,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v8/services/types/feed_mapping_service.py
|
Python
|
apache-2.0
| 5,553
|
from .service import Fullcontact
|
ducksboard/libsaas
|
libsaas/services/fullcontact/__init__.py
|
Python
|
mit
| 33
|
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.oval_5.defs.windows.ObjectType import ObjectType
logger = logging.getLogger(__name__)
class RegKeyEffectiveRights53ObjectElement(ObjectType):
MODEL_MAP = {
'tag_name': 'regkeyeffectiverights53_object',
'elements': [
{'tag_name': 'behaviors', 'class': 'RegkeyEffectiveRights53Behaviors', 'min': 0},
{'tag_name': 'hive', 'class': 'EntityObjectRegistryHiveType', 'min': 0},
{'tag_name': 'key', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'nillable': True, 'min': 0},
{'tag_name': 'trustee_sid', 'class': 'scap.model.oval_5.defs.EntityObjectType', 'min': 0},
],
}
|
cjaymes/pyscap
|
src/scap/model/oval_5/defs/windows/RegKeyEffectiveRights53ObjectElement.py
|
Python
|
gpl-3.0
| 1,362
|
import os
import sys
import pandas as pd
import numpy as np
from numpy.random import poisson, uniform
from numpy import mean
import time
import math
po = True
teamsheetpath = sys.path[0] + '/teamcsvs/'
compstat = {'TDF': 'TDA', 'TDA': 'TDF', #Dictionary to use to compare team stats with opponent stats
'FGF': 'FGA', 'FGA': 'FGF',
'SFF': 'SFA', 'SFA': 'SFF',
'PAT1%F': 'PAT1%A', 'PAT1%A': 'PAT1%F',
'PAT2%F': 'PAT2%A', 'PAT2%A': 'PAT2%F'}
def get_opponent_stats(opponent): #Gets summaries of statistics for opponent each week
opponent_stats = {}
global teamsheetpath
opp_stats = pd.DataFrame.from_csv(teamsheetpath + opponent + '.csv')
for stat in opp_stats.columns:
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
opponent_stats.update({stat: opp_stats[stat].mean()})
try:
opponent_stats.update({'PAT1%F': float(opp_stats['PAT1FS'].sum()) / opp_stats['PAT1FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%F': .99})
try:
opponent_stats.update({'PAT2%F': float(opp_stats['PAT2FS'].sum()) / opp_stats['PAT2FA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%F': .5})
try:
opponent_stats.update({'PAT1%A': float(opp_stats['PAT1AS'].sum()) / opp_stats['PAT1AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT1%A': .99})
try:
opponent_stats.update({'PAT2%A': float(opp_stats['PAT2AS'].sum()) / opp_stats['PAT2AA'].sum()})
except ZeroDivisionError:
opponent_stats.update({'PAT2%A': .5})
return opponent_stats
def get_residual_performance(team): #Get how each team has done compared to the average performance of their opponents
global teamsheetpath
score_df = pd.DataFrame.from_csv(teamsheetpath + team + '.csv')
residual_stats = {}
score_df['PAT1%F'] = np.nan
score_df['PAT2%F'] = np.nan
score_df['PAT1%A'] = np.nan
score_df['PAT2%A'] = np.nan
for week in score_df.index:
try:
score_df['PAT1%F'][week] = float(score_df['PAT1FS'][week]) / score_df['PAT1FA'][week]
except ZeroDivisionError:
score_df['PAT1%F'][week] = 0.99
#print ('For: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%F'][week] = float(score_df['PAT2FS'][week]) / score_df['PAT2FA'][week]
except ZeroDivisionError:
score_df['PAT2%F'][week] = 0.5
try:
score_df['PAT1%A'][week] = float(score_df['PAT1AS'][week]) / score_df['PAT1AA'][week]
except ZeroDivisionError:
score_df['PAT1%A'][week] = 0.99
#print ('Against: ' + str(score_df['PAT1%F'][week]))
try:
score_df['PAT2%A'][week] = float(score_df['PAT2AS'][week]) / score_df['PAT2AA'][week]
except ZeroDivisionError:
score_df['PAT2%A'][week] = 0.5
opponent_stats = get_opponent_stats(score_df['OPP'][week])
for stat in opponent_stats:
if week == 1:
score_df['OPP_' + stat] = np.nan
score_df['OPP_' + stat][week] = opponent_stats[stat]
for stat in opponent_stats:
score_df['R_' + stat] = score_df[stat] - score_df['OPP_' + compstat[stat]]
if stat in ['TDF', 'FGF', 'SFF', 'TDA', 'FGA', 'SFA']:
residual_stats.update({stat: score_df['R_' + stat].mean()})
elif stat == 'PAT1%F':
residual_stats.update({stat: (score_df['R_PAT1%F'].multiply(score_df['PAT1FA'])).sum() / score_df['PAT1FA'].sum()})
elif stat == 'PAT2%F':
residual_stats.update({stat: (score_df['R_PAT2%F'].multiply(score_df['PAT2FA'])).sum() / score_df['PAT2FA'].sum()})
elif stat == 'PAT1%A':
residual_stats.update({stat: (score_df['R_PAT1%A'].multiply(score_df['PAT1AA'])).sum() / score_df['PAT1AA'].sum()})
elif stat == 'PAT2%A':
residual_stats.update({stat: (score_df['R_PAT2%A'].multiply(score_df['PAT2AA'])).sum() / score_df['PAT2AA'].sum()})
try:
residual_stats.update({'GOFOR2': float(score_df['PAT2FA'].sum()) / score_df['TDF'].sum()})
except ZeroDivisionError:
residual_stats.update({'GOFOR2': .1})
#print team
#print residual_stats
return residual_stats
def get_score(expected_scores): #Get the score for a team based on expected scores
score = 0
if expected_scores['TD'] > 0:
tds = poisson(expected_scores['TD'])
else:
tds = poisson(0.01)
score = score + 6 * tds
if expected_scores['FG'] > 0:
fgs = poisson(expected_scores['FG'])
else:
fgs = poisson(0.01)
score = score + 3 * fgs
if expected_scores['S'] > 0:
sfs = poisson(expected_scores['S'])
else:
sfs = poisson(0.01)
score = score + 2 * sfs
for td in range(tds):
go_for_2_determinant = uniform(0, 1)
if go_for_2_determinant <= expected_scores['GOFOR2']: #Going for 2
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT2PROB']:
score = score + 2
else:
continue
else: #Going for 1
#print(expected_scores['PAT1PROB'])
successful_pat_determinant = uniform(0, 1)
if successful_pat_determinant <= expected_scores['PAT1PROB']:
score = score + 1
else:
continue
return score
def game(team_1, team_2,
expected_scores_1, expected_scores_2,
playoff): #Get two scores and determine a winner
score_1 = get_score(expected_scores_1)
score_2 = get_score(expected_scores_2)
if score_1 > score_2:
win_1 = 1
win_2 = 0
draw_1 = 0
draw_2 = 0
elif score_2 > score_1:
win_1 = 0
win_2 = 1
draw_1 = 0
draw_2 = 0
else:
if playoff:
win_1 = 0.5
win_2 = 0.5
draw_1 = 0
draw_2 = 0
else:
win_1 = 0
win_2 = 0
draw_1 = 1
draw_2 = 1
summary = {team_1: [win_1, draw_1, score_1]}
summary.update({team_2: [win_2, draw_2, score_2]})
return summary
def get_expected_scores(team_1_stats, team_2_stats, team_1_df, team_2_df): #Get the expected scores for a matchup based on the previous teams' performances
expected_scores = {}
for stat in team_1_stats:
expected_scores.update({'TD': mean([team_1_stats['TDF'] + team_2_df['TDA'].mean(),
team_2_stats['TDA'] + team_1_df['TDF'].mean()])})
expected_scores.update({'FG': mean([team_1_stats['FGF'] + team_2_df['FGA'].mean(),
team_2_stats['FGA'] + team_1_df['FGF'].mean()])})
expected_scores.update({'S': mean([team_1_stats['SFF'] + team_2_df['SFA'].mean(),
team_2_stats['SFA'] + team_1_df['SFF'].mean()])})
#print mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
# team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
expected_scores.update({'GOFOR2': team_1_stats['GOFOR2']})
pat1prob = mean([team_1_stats['PAT1%F'] + team_2_df['PAT1AS'].astype('float').sum() / team_2_df['PAT1AA'].sum(),
team_2_stats['PAT1%A'] + team_1_df['PAT1FS'].astype('float').sum() / team_1_df['PAT1FA'].sum()])
if not math.isnan(pat1prob):
expected_scores.update({'PAT1PROB': pat1prob})
else:
expected_scores.update({'PAT1PROB': 0.99})
#print(expected_scores['PAT1PROB'])
pat2prob = mean([team_1_stats['PAT2%F'] + team_2_df['PAT2AS'].astype('float').sum() / team_2_df['PAT2AA'].sum(),
team_2_stats['PAT2%A'] + team_1_df['PAT2FS'].astype('float').sum() / team_1_df['PAT2FA'].sum()])
if not math.isnan(pat2prob):
expected_scores.update({'PAT2PROB': pat2prob})
else:
expected_scores.update({'PAT2PROB': 0.5})
#print(expected_scores)
return expected_scores
def matchup(team_1, team_2):
ts = time.time()
team_1_season = pd.DataFrame.from_csv(teamsheetpath + team_1 + '.csv')
team_2_season = pd.DataFrame.from_csv(teamsheetpath + team_2 + '.csv')
stats_1 = get_residual_performance(team_1)
stats_2 = get_residual_performance(team_2)
expected_scores_1 = get_expected_scores(stats_1, stats_2, team_1_season, team_2_season)
expected_scores_2 = get_expected_scores(stats_2, stats_1, team_2_season, team_1_season)
team_1_wins = 0
team_2_wins = 0
team_1_draws = 0
team_2_draws = 0
team_1_scores = []
team_2_scores = []
i = 0
error = 1
while error > 0.000001 or i < 5000000: #Run until convergence after 5 million iterations
summary = game(team_1, team_2,
expected_scores_1, expected_scores_2,
po)
team_1_prev_wins = team_1_wins
team_1_wins += summary[team_1][0]
team_2_wins += summary[team_2][0]
team_1_draws += summary[team_1][1]
team_2_draws += summary[team_2][1]
team_1_scores.append(summary[team_1][2])
team_2_scores.append(summary[team_2][2])
team_1_prob = float(team_1_wins) / len(team_1_scores)
team_2_prob = float(team_2_wins) / len(team_2_scores)
if i > 0:
team_1_prev_prob = float(team_1_prev_wins) / i
error = team_1_prob - team_1_prev_prob
i = i + 1
if i == 5000000:
print('Probability converged within 5 million iterations')
else:
print('Probability converged after ' + str(i) + ' iterations')
games = pd.DataFrame.from_items([(team_1, team_1_scores), (team_2, team_2_scores)])
summaries = games.describe(percentiles = [0.025, 0.1, 0.25, 0.5, 0.75, 0.9, 0.975])
output = {'ProbWin': {team_1: team_1_prob, team_2: team_2_prob}, 'Scores': summaries}
print(team_1 + '/' + team_2 + ' score distributions computed in ' + str(round(time.time() - ts, 1)) + ' seconds')
return output
|
JoeJimFlood/NFLPrediction2014
|
matchup.py
|
Python
|
mit
| 10,272
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.