text stringlengths 957 885k |
|---|
<filename>regular_language/unit_tests/test_ast_AST_expand_phrases.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit Tests for ast.AST.expand_phrases() """
import unittest
import nlpregex.regular_language.ast
from nlpregex.regular_language.unit_tests.test_ast_helper import test_AST_helper
class test_ast_AST_expand_phrases( unittest.TestCase ):
def __init__( self, *args, **kwargs ):
unittest.TestCase.__init__(self, *args, **kwargs)
self.helper = test_AST_helper()
def construct_ast_from_spec( self, spec01 ):
return self.helper.construct_ast_from_spec(spec01)
def display_tree( self, ast01 ):
return self.helper.display_tree(ast01)
def compare_specs( self, spec01, spec02 ):
return self.helper.compare_specs( spec01, spec02 )
def test_0001(self):
spec01 = ''
string_expected = ''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0002(self):
spec01 = 'E_E01'
string_expected = ''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0003(self):
spec01 = 'T_T01'
string_expected = 'T01'
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0003a(self):
spec01 = 'T_T01'
string_expected = '( T01 [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'T_T01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0004(self):
spec01 = 'N_N01'
string_expected = 'N01'
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0004a(self):
spec01 = 'N_N01'
string_expected = '( N01 [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'N_N01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0005(self):
spec01 = 'S_S01:T_T01'
string_expected = 'T01'
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0005a(self):
spec01 = 'S_S01:T_T01'
string_expected = '( [ token02 token01 ] ( T01 ) [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'S_S01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0005b(self):
spec01 = 'S_S01:T_T01'
string_expected = '( T01 [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'T_T01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0005c(self):
spec01 = 'S_S01:T_T01'
string_expected = '( [ token02 token01 ] ( ( T01 [ token07 token08 ] ) ) [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'S_S01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0006(self):
spec01 = 'S_S01:T_T01 N_N02'
string_expected = 'T01 N02'
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0006a(self):
spec01 = 'S_S01:T_T01 N_N02'
string_expected = '( [ token02 token01 ] ( ( T01 [ token07 token08 ] ) ( N02 [ token11 token12 ] ) ) [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'S_S01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
node_003 = self.helper.get_node(ast01, 'N_N02')
node_003.append_out_token_pre ('token09')
node_003.append_out_token_pre ('token10')
node_003.append_out_token_post('token11')
node_003.append_out_token_post('token12')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0007(self):
spec01 = '|_U01:T_T01'
string_expected = 'T01'
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0007a(self):
spec01 = '|_U01:T_T01'
string_expected = '( [ token02 token01 ] T01 [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '|_U01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0007b(self):
spec01 = '|_U01:T_T01'
string_expected = '( T01 [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, 'T_T01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0007c(self):
spec01 = '|_U01:T_T01'
string_expected = '( [ token02 token01 ] ( T01 [ token07 token08 ] ) [ token03 token04 ] )'
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '|_U01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0008(self):
spec01 = '|_U01:T_T01 T_T02'
string_expected = '''
T01
T02
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0008a(self):
spec01 = '|_U01:T_T01 T_T02'
string_expected = '''
( [ token02 token01 ] T01 [ token03 token04 ] )
( [ token02 token01 ] T02 [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '|_U01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0008b(self):
spec01 = '|_U01:T_T01 T_T02'
string_expected = '''
( [ token02 token01 ] ( T01 [ token07 token08 ] ) [ token03 token04 ] )
( [ token02 token01 ] ( T02 [ token11 token12 ] ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '|_U01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
node_003 = self.helper.get_node(ast01, 'T_T02')
node_003.append_out_token_pre ('token09')
node_003.append_out_token_pre ('token10')
node_003.append_out_token_post('token11')
node_003.append_out_token_post('token12')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0009(self):
spec01 = '|_U01:T_T01 N_N02 T_T03'
string_expected = '''
T01
N02
T03
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0010(self):
spec01 = '?_R01:T_T01'
string_expected = '''
T01
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0010a(self):
spec01 = '?_R01:T_T01'
string_expected = '''
( [ token02 token01 ] __EPS__ [ token03 token04 ] )
( [ token02 token01 ] T01 [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '?_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0010b(self):
spec01 = '?_R01:T_T01'
string_expected = '''
( [ token02 token01 ] __EPS__ [ token03 token04 ] )
( [ token02 token01 ] ( T01 [ token07 token08 ] ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '?_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0011(self):
spec01 = '{0,3}_R01:T_T01'
string_expected = '''
T01{ 0, 3 }
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0011a(self):
spec01 = '{0,3}_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( T01{ 0, 3 } ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '{0,3}_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0011b(self):
spec01 = '{0,3}_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( ( T01 [ token07 token08 ] ){ 0, 3 } ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '{0,3}_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0012(self):
spec01 = '{2,7}_R01:T_T01'
string_expected = '''
T01{ 2, 7 }
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0012a(self):
spec01 = '{2,7}_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( T01{ 2, 7 } ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '{2,7}_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0012b(self):
spec01 = '{2,7}_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( ( T01 [ token07 token08 ] ){ 2, 7 } ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '{2,7}_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0013(self):
spec01 = '*_R01:T_T01'
string_expected = '''
T01*
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0013a(self):
spec01 = '*_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( T01* ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '*_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0013b(self):
spec01 = '*_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( ( T01 [ token07 token08 ] )* ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '*_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0014(self):
spec01 = '+_R01:T_T01'
string_expected = '''
T01+
'''
ast01 = self.construct_ast_from_spec(spec01)
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0014a(self):
spec01 = '+_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( T01+ ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '+_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
def test_0014b(self):
spec01 = '+_R01:T_T01'
string_expected = '''
( [ token02 token01 ] ( ( T01 [ token07 token08 ] )+ ) [ token03 token04 ] )
'''
ast01 = self.construct_ast_from_spec(spec01)
node_001 = self.helper.get_node(ast01, '+_R01')
node_001.append_out_token_pre ('token01')
node_001.append_out_token_pre ('token02')
node_001.append_out_token_post('token03')
node_001.append_out_token_post('token04')
node_002 = self.helper.get_node(ast01, 'T_T01')
node_002.append_out_token_pre ('token05')
node_002.append_out_token_pre ('token06')
node_002.append_out_token_post('token07')
node_002.append_out_token_post('token08')
list01 = ast01.expand_phrases(True)
str01 = '\n'.join(list01)
self.assertEqual( self.compare_specs(str01, string_expected), True )
if __name__ == '__main__':
unittest.main()
|
"""
court_directory.py - Download and cache a court directory for Texas
Build URLs at:
https://card.txcourts.gov/DirectorySearch.aspx
Copyright (c) 2020 by <NAME>, J.D. All Rights Reserved.
"""
import csv
import json
import requests
from datetime import date, time
URL = 'https://card.txcourts.gov/ExcelExportPublic.aspx?type=P&export=E&SortBy=tblCounty.Sort_ID,%20tblCourt.Court_Identifier&Active_Flg=true&Court_Type_CD=0&Court_Sub_Type_CD=0&County_ID=0&City_CD=0&Court=&DistrictPrimaryLocOnly=1&AdminJudicialRegion=0&COADistrictId=0' # noqa
URL = 'https://card.txcourts.gov/ExcelExportPublic.aspx?type=P&export=E&CommitteeID=0&Court=&SortBy=tblCounty.Sort_ID,%20Last_Name&Active_Flg=true&Last_Name=&First_Name=&Court_Type_CD=0&Court_Sub_Type_CD=0&County_ID=0&City_CD=0&Address_Type_CD=0&Annual_Report_CD=0&PersonnelType1=&PersonnelType2=&DistrictPrimaryLocOnly=0&AdminJudicialRegion=0&COADistrictId=0' # noqa
DIRECTORY_FILE = 'util/data/directory.json'
CACHE_FILE = 'util/data/court_directory_cache.tsv'
class Entry(object):
def __init__(self, fields: list) -> dict:
"""
Initialize all properties to None because not every *fields* list will
have all the indices we reference. Some rows from the data source are
mal-formed or at least not regularly-formed. The try/except will prevent
us from blowing up on a missing index, but we need to initialize everything
to None so that every instance of this class has every field defined.
"""
try:
self.court_type = None
self.court = None
self.county = None
self.prefix = None
self.first_name = None
self.middle_name = None
self.last_name = None
self.suffix = None
self.title = None
self.address = None
self.city = None
self.state = "TX"
self.postal_code = None
self.telephone = None
self.email = None
self.court_type = fields[0]
self.court = fields[1]
self.county = fields[2]
self.prefix = fields[3]
self.first_name = fields[4]
self.middle_name = fields[5]
self.last_name = fields[6]
self.suffix = fields[7]
self.title = fields[8]
self.address = fields[9]
self.city = fields[10]
self.state = "TX"
self.postal_code = fields[11]
self.telephone = fields[12]
self.email = fields[13]
except IndexError:
pass
class CourtDirectory(object):
directory = None
def __init__(self):
"""
Load the cached court directory.
"""
if not CourtDirectory.directory:
with open(DIRECTORY_FILE, 'r') as fp:
CourtDirectory.directory = json.load(fp)
def get_counties(self) -> list:
"""
Get a list of counties
"""
return list(CourtDirectory.directory.keys())
def get_county_tuples(self) -> list:
"""
Get a list of counties
"""
counties = self.get_counties()
return [(c, c) for c in counties]
def get_court_types(self, county: str) -> list:
"""
Get a list of types of courts for the given county.
"""
return list(CourtDirectory.directory.get(county, {}).keys())
def get_court_type_tuples(self, county: str) -> list:
"""
Get a list of court types as a list of tuples.
"""
court_types = self.get_court_types(county)
return [(c, c) for c in court_types]
def get_courts(self, county: str, court_type: str) -> list:
"""
Get a list of courts of the given type for the given county.
"""
try:
return list(CourtDirectory.directory[county][court_type].keys())
except KeyError:
pass
return list()
def get_court_tuples(self, county: str, court_type: str) -> list:
"""
Get a list of courts as a list of tuples.
"""
courts = self.get_courts(county, court_type)
return [(c, c) for c in courts]
def get_court_personnel(self, county: str, court_type: str, court: str) -> list:
"""
Get a list of people who work in the given court.
"""
try:
return list(CourtDirectory.directory[county][court_type][court])
except KeyError:
pass
return list()
@staticmethod
def process():
"""
This method will download a refreshed personnel list and update the
cache files.
"""
CourtDirectory.retrieve()
directory = CourtDirectory.parse()
CourtDirectory.save(directory)
@staticmethod
def retrieve():
"""
Retrieve court directory and save it to a set of cached files.
"""
result = requests.get(URL)
with open(CACHE_FILE, 'w') as fp:
for chunk in result.iter_content(chunk_size=1024):
fp.write(chunk.decode())
@staticmethod
def parse():
counties = {}
with open(CACHE_FILE, newline='') as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t', quotechar='"')
for row in reader:
# Skip blank rows
if not row:
continue
# Create object with named fields
entry = Entry(row)
if not entry.county or not entry.court or len(entry.court.strip()) == 0:
continue
# Update list of counties if we have not seen this one before
if entry.county and entry.county not in counties:
counties[entry.county] = {}
# Add to the court types for this county
if entry.court_type not in counties[entry.county]:
counties[entry.county][entry.court_type] = {}
# Add to the courts of this type for this county
if entry.court not in counties[entry.county][entry.court_type]:
counties[entry.county][entry.court_type][entry.court] = []
# Add to to this court's personnel
counties[entry.county][entry.court_type][entry.court].append(entry)
return counties
@staticmethod
def save(directory: dict):
with open(DIRECTORY_FILE, 'w') as fp:
json.dump(directory, fp, indent=4, default=serialize, sort_keys=True)
def serialize(obj):
if isinstance(obj, date):
serial = obj.isoformat()
return serial
if isinstance(obj, time):
serial = obj.isoformat()
return serial
return obj.__dict__
if __name__ == '__main__':
print("Processing . . .", end='')
CourtDirectory.process()
print("Done")
mydir = CourtDirectory()
print("COUNTIES".center(80, "="))
print(mydir.get_counties())
print("COURT TYPES".center(80, "="))
print(mydir.get_court_types('Collin'))
print("COURTS".center(80, "="))
print(mydir.get_courts('Collin', 'District'))
print("PERSONNEL".center(80, "="))
print(json.dumps(mydir.get_court_personnel('Collin', 'District', '416th District Court'), indent=4))
|
<gh_stars>0
#!/usr/bin/env python
import argparse
import json
import logging
from time import sleep
import pika
import requests
from requests import ConnectionError
import yaml
from subprocess import check_output
from subprocess import CalledProcessError
import boto3
import datetime as dt
class ClusterDaemon(object):
def __init__(self, ansible_config_path, aws_key_name=None, interval=60,
qname='sm_annotate', debug=False):
with open(ansible_config_path) as fp:
self.ansible_config = yaml.load(fp)
self.interval = min(interval, 1200)
self.aws_key_name = aws_key_name or self.ansible_config['aws_key_name']
self.master_hostgroup = self.ansible_config['cluster_configuration']['instances']['master']['hostgroup']
self.slave_hostgroup = self.ansible_config['cluster_configuration']['instances']['slave']['hostgroup']
self.stage = self.ansible_config['stage']
self.qname = qname
self.debug = debug
self._setup_logger()
self.ec2 = boto3.resource('ec2', self.ansible_config['aws_region'])
def _resolve_spark_master(self):
self.logger.debug('Resolving spark master ip...')
spark_master_instances = list(self.ec2.instances.filter(
Filters=[{'Name': 'tag:hostgroup', 'Values': [self.master_hostgroup]},
{'Name': 'instance-state-name', 'Values': ['running', 'stopped', 'pending']}]))
return spark_master_instances[0] if spark_master_instances else None
@property
def spark_master_public_ip(self):
spark_master = self._resolve_spark_master()
return spark_master.public_ip_address if spark_master else None
@property
def spark_master_private_ip(self):
spark_master = self._resolve_spark_master()
return spark_master.private_ip_address if spark_master else None
def _setup_logger(self):
self.logger = logging.getLogger('sm_cluster_auto_start')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG if self.debug else logging.INFO)
def _send_email(self, email, subj, body):
ses = boto3.client('ses', 'eu-west-1')
resp = ses.send_email(
Source='<EMAIL>',
Destination={
'ToAddresses': [email]
},
Message={
'Subject': {
'Data': subj
},
'Body': {
'Text': {
'Data': body
}
}
}
)
if resp['ResponseMetadata']['HTTPStatusCode'] == 200:
self.logger.info('Email with "{}" subject was sent to {}'.format(subj, email))
else:
self.logger.warning('SEM failed to send email to {}'.format(email))
def _send_rest_request(self, address):
try:
resp = requests.get(address)
except ConnectionError as e:
self.logger.debug('{} - {}'.format(address, e))
return False
except Exception as e:
self.logger.warning('{} - {}'.format(address, e))
return False
else:
self.logger.debug(resp)
return resp.ok
def queue_empty(self):
try:
creds = pika.PlainCredentials(self.ansible_config['rabbitmq_user'],
self.ansible_config['rabbitmq_password'])
conn = pika.BlockingConnection(pika.ConnectionParameters(host=self.ansible_config['rabbitmq_host'],
credentials=creds))
ch = conn.channel()
m = ch.queue_declare(queue=self.qname, durable=True, arguments={'x-max-priority': 3})
self.logger.debug('Messages in the queue: {}'.format(m.method.message_count))
return m.method.message_count == 0
except Exception as e:
self.logger.warning(e, exc_info=True)
return True
def cluster_up(self):
return self._send_rest_request('http://{}:8080/api/v1/applications'.format(self.spark_master_public_ip))
def job_running(self):
return self._send_rest_request('http://{}:4040/api/v1/applications'.format(self.spark_master_public_ip))
def _local(self, command, success_msg=None, failed_msg=None):
try:
res = check_output(command)
self.logger.debug(res)
self.logger.info(success_msg)
except CalledProcessError as e:
self.logger.warning(e.output)
self.logger.error(failed_msg)
raise e
def cluster_start(self):
self.logger.info('Spinning up the cluster...')
self._local(['ansible-playbook', '-i', self.stage, '-f', '1', 'aws_start.yml', '-e components=master,slave'],
'Cluster is spun up', 'Failed to spin up the cluster')
def cluster_stop(self):
self.logger.info('Stopping the cluster...')
self._local(['ansible-playbook', '-i', self.stage, '-f', '1', 'aws_stop.yml', '-e', 'components=master,slave'],
'Cluster is stopped successfully', 'Failed to stop the cluster')
def cluster_setup(self):
self.logger.info('Setting up the cluster...')
self._local(['ansible-playbook', '-i', self.stage, '-f', '1', 'aws_cluster_setup.yml'],
'Cluster setup is finished', 'Failed to set up the cluster')
def sm_engine_deploy(self):
self.logger.info('Deploying SM engine code...')
self._local(['ansible-playbook', '-i', self.stage, '-f', '1', 'deploy/engine.yml'],
'The SM engine is deployed', 'Failed to deploy the SM engine')
def _post_to_slack(self, emoji, msg):
if not self.debug and self.ansible_config['slack_webhook_url']:
msg = {
"channel": self.ansible_config['slack_channel'],
"username": "webhookbot",
"text": ":{}: {}".format(emoji, msg),
"icon_emoji": ":robot_face:"
}
requests.post(self.ansible_config['slack_webhook_url'], json=msg)
def _ec2_hour_over(self):
spark_instances = list(self.ec2.instances.filter(
Filters=[{'Name': 'tag:hostgroup', 'Values': [self.master_hostgroup, self.slave_hostgroup]},
{'Name': 'instance-state-name', 'Values': ['running', 'pending']}]))
launch_time = min([i.launch_time for i in spark_instances])
now_time = dt.datetime.utcnow()
self.logger.debug('launch: {} now: {}'.format(launch_time, now_time))
return 0 < (60 + (launch_time.minute - now_time.minute)) % 60 <= max(5, 2 * self.interval / 60)
def _try_start_setup_deploy(self, setup_failed_max=5):
setup_failed = 0
while True:
try:
self.logger.info('Queue is not empty. Starting the cluster (%s attempt)...', setup_failed+1)
self.cluster_start()
m = {
'master': self.ansible_config['cluster_configuration']['instances']['master'],
'slave': self.ansible_config['cluster_configuration']['instances']['slave']
}
self._post_to_slack('rocket', "[v] Cluster started: {}".format(m))
self.cluster_setup()
self.sm_engine_deploy()
self._post_to_slack('motorway', "[v] Cluster setup finished, SM engine deployed")
sleep(60)
except Exception as e:
self.logger.warning('Failed to start/setup/deploy cluster: %s', e)
setup_failed += 1
if setup_failed >= setup_failed_max:
raise e
else:
break
def start(self):
self.logger.info('Started the SM cluster auto-start daemon (interval=%dsec)...', self.interval)
try:
while True:
if not self.queue_empty():
if not self.cluster_up():
self._try_start_setup_deploy()
else:
if self.cluster_up() and not self.job_running() and self._ec2_hour_over():
self.logger.info('Queue is empty. No jobs running. Stopping the cluster...')
self.cluster_stop()
self._post_to_slack('checkered_flag', "[v] Cluster stopped")
sleep(self.interval)
except Exception as e:
self._post_to_slack('sos', "[v] Something went wrong: {}".format(e))
self._send_email('<EMAIL>', 'Cluster auto start daemon ({}) failed'.format(self.stage), str(e))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Daemon for auto starting SM cluster')
parser.add_argument('--ansible-config', dest='ansible_config_path', default='dev/group_vars/all.yml', type=str,
help='Ansible config path')
parser.add_argument('--interval', type=int, default=120, help='Cluster status check interval in sec (<1200)')
parser.add_argument('--debug', dest='debug', action='store_true', help='Run in debug mode')
args = parser.parse_args()
cluster_daemon = ClusterDaemon(args.ansible_config_path, interval=args.interval,
qname='sm_annotate', debug=args.debug)
cluster_daemon.start()
|
import dmc2gym
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
import gym
from collections import deque
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
gym.Wrapper.__init__(self, env)
self._k = k
self._frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = gym.spaces.Box(
low=0,
high=1,
shape=((shp[0] * k,) + shp[1:]),
dtype=env.observation_space.dtype
)
self._max_episode_steps = env._max_episode_steps
def reset(self):
obs = self.env.reset()
for _ in range(self._k):
self._frames.append(obs)
return self._get_obs()
def step(self, action):
obs, reward, done, info = self.env.step(action)
self._frames.append(obs)
return self._get_obs(), reward, done, info
def _get_obs(self):
assert len(self._frames) == self._k
return np.concatenate(list(self._frames), axis=0)
def imshow(obs):
if obs.shape[2] == 9:
plt.subplot(131)
plt.imshow(obs[:, :, :3])
plt.subplot(132)
plt.imshow(obs[:, :, 3:6])
plt.subplot(133)
plt.imshow(obs[:, :, 6:])
# plt.subplot(231)
# plt.imshow(obs[:, :, :3])
# plt.subplot(232)
# plt.imshow(obs[:, :, 3:6])
# plt.subplot(233)
# plt.imshow(obs[:, :, 6:])
# plt.subplot(234)
# plt.imshow(np.abs(obs[:, :, 3:6] - obs[:, :, :3]))
# plt.subplot(235)
# plt.imshow(np.abs(obs[:, :, 6:] - obs[:, :, 3:6]))
else:
plt.imshow(obs)
plt.axis('off')
plt.tight_layout()
plt.pause(0.1)
plt.show(block=False)
def main_dmc2gym():
action_repeat = dict(
cartpole=8,
walker=2,
cheetah=4,
finger=2,
reacher=4,
ball_in_cup=4,
hopper=4,
fish=4,
pendulum=4,
quadruped=4
)
camera_id = dict(
cartpole=0,
walker=0,
cheetah=0,
finger=0,
reacher=0,
ball_in_cup=0,
hopper=0,
fish=0,
pendulum=0,
quadruped=2
)
img_size = 84
n_steps = 50
# env_name = ['quadruped', 'walk']
# env_name = ['quadruped', 'run']
# env_name = ['dog', 'run']
# env_name = ['cheetah', 'run']
# env_name = ['walker', 'stand']
env_name = ['walker', 'walk']
# env_name = ['walker', 'run']
# env_name = ['finger', 'spin'] # Sparse
# env_name = ['finger', 'turn_easy'] # Sparse
# env_name = ['finger', 'turn_hard'] # Sparse
# env_name = ['reacher', 'easy'] # Sparse
# env_name = ['reacher', 'hard'] # Sparse
# env_name = ['hopper', 'stand']
# env_name = ['hopper', 'hop']
# env_name = ['cartpole', 'swingup']
# env_name = ['cartpole', 'balance']
# env_name = ['cartpole', 'balance_sparse']
# env_name = ['cartpole', 'swingup_sparse']
# env_name = ['ball_in_cup', 'catch'] # Sparse
# env_name = ['fish', 'upright']
# env_name = ['fish', 'swim']
# env_name = ['pendulum', 'swingup']
from_image = True
if from_image:
env = dmc2gym.make(
domain_name=env_name[0],
task_name=env_name[1],
difficulty='easy',
background_dataset_path='../dmc2gym/dmc2gym/videos/DAVIS/JPEGImages/480p',
dynamic=False,
default_background=False,
default_camera=True,
default_color=True,
seed=1,
visualize_reward=False,
from_pixels=from_image,
height=img_size,
width=img_size,
frame_skip=action_repeat[env_name[0]]
)
env = FrameStack(env, k=3)
else:
env = dmc2gym.make(
domain_name=env_name[0],
task_name=env_name[1],
seed=1,
visualize_reward=False,
from_pixels=False,
frame_skip=1,
camera_id=camera_id[env_name[0]],
)
print('[INFO] Observation space: ', env.observation_space)
print('[INFO] Action space: ', env.action_space)
o = env.reset()
reset_step = 10
for i in tqdm(range(n_steps)):
a = env.action_space.sample()
o, r, done, _ = env.step(a)
print('Reward: ', r)
if from_image:
imshow(o.transpose(1, 2, 0))
else:
im = env.render(mode='rgb_array')
imshow(im)
if done or (i != 0 and i % reset_step == 0):
env.reset()
if __name__ == '__main__':
main_dmc2gym()
|
# Let's import some dependences
import pandas as pd
import concurrent.futures as cf
from yahoofinancials import YahooFinancials
import re
import ast
import time
import requests
import bs4 as bs
from bs4 import BeautifulSoup
# I use Wikipedia to see which companies are in the S&P500 index
sp500 = 'http://en.wikipedia.org/wiki/List_of_S%26P_500_companies'
# I create an empty list fo fill in with data
tickers = []
for row in table.findAll('tr')[1:]:
ticker = row.findAll('td')[0].text
tickers.append(ticker)
print(tickers)
# I create three dictionary to use for storing information
balanceSheet = {}
incomeStatement = {}
cashStatement = {}
# Let's iterate trough ticker in list of tickers
def retrieve_stock_data(ticker):
try:
print(ticker)
start = time.time()
yahoo_financials = YahooFinancials(ticker)
balance_sheet_data = yahoo_financials.get_financial_stmts('annual', 'balance')
income_statement_data = yahoo_financials.get_financial_stmts('annual', 'income')
cash_statement_data = yahoo_financials.get_financial_stmts('annual', 'cash')
balanceSheet[ticker] = balance_sheet_data['balanceSheetHistory'][ticker]
incomeStatement[ticker] = income_statement_data['incomeStatementHistory'][ticker]
cashStatement[ticker] = cash_statement_data['cashflowStatementHistory'][ticker]
except:
print('error with retrieving stock data')
# I created a function for ROE and EPS growth
roe_dict, epsg_dict = {}, {}
count_missing, count_cond, count_eps_0 = 0, 0, 0
for (keyB, valB), (keyI, valI) in zip(balanceSheet.items(), incomeStatement.items()):
try:
if keyB == keyI:
yearsI = [k for year in valI for k, v in year.items()]
yearsB = [k for year in valB for k, v in year.items()]
if yearsI == yearsB:
count_cond += 1
equity = [v['totalStockholderEquity'] for year in valB for k, v in year.items()]
commonStock = [v['commonStock'] for year in valB for k, v in year.items()]
profit = [v['grossProfit'] for year in valI for k, v in year.items()]
revenue = [v['totalRevenue'] for year in valI for k, v in year.items()]
netIncome = [v['netIncome'] for year in valI for k, v in year.items()]
roe = [round(netin/equity*100,2) for netin, equity in zip(netIncome, equity)]
roe_dict[keyB] = (round(sum(roe)/len(roe),2), roe)
eps = [round(earn/stono,2) for earn, stono in zip(profit, commonStock)]
try:
epsg = []
for ep in range(len(eps)):
if ep == 0:
continue
elif ep == 1:
epsg.append(round(100*((eps[ep-1]/eps[ep])-1),2))
elif ep == 2:
epsg.append(round(100*((eps[ep-2]/eps[ep])**(1/2)-1),2))
epsg.append(round(100*((eps[ep-1]/eps[ep])-1),2))
elif ep == 3:
epsg.append(round(100*((eps[ep-3]/eps[ep])**(1/3)-1),2))
epsg.append(round(100*((eps[ep-1]/eps[ep])-1),2))
else:
print('More than 4 years of FY data')
epsg_dict[keyB] = (round(sum(epsg)/len(epsg),2), epsg)
except:
# print(keyB, 'eps contains 0')
count_eps_0 += 1
epsg_dict[keyB] = (0, eps)
except:
# print(keyB, 'data missing')
count_missing += 1
print('Yearly data avail',count_cond, 'out of', len(balanceSheet))
print('Some key data missing', count_missing, 'out of', len(balanceSheet))
print('EPS Growth NaN', count_eps_0, 'out of', len(balanceSheet))
# Let's create the conditions on this 2 dictionary
ROE_req = 10 # ROE above 10%
EPSG_req = 10 # EPS growth above 10%
print('-'*50, 'RETURN ON EQUITY','-'*50)
# Let's subsection the ROE by getting the first item which is the requirement ROE
roe_crit = {k:v for (k,v) in roe_dict.items() if v[0] >= ROE_req and sum(n < 0 for n in v[1])==0}
print(f'The number of companies have a ROE greater than 10% are: ' + str(len(roe_crit)))
#print(roe_crit)
print('-'*50, 'EARNINGS PER SHARE GROWTH','-'*50)
eps_crit = {k:v for (k,v) in epsg_dict.items() if v[0] >= EPSG_req and sum(n < 0 for n in v[1])==0}
print(f'The number of companies have a EPS growth greater than 10% are: ' + str(len(eps_crit)))
#print(eps_crit)
print('-'*50, 'ROE & EPS Growth Critera','-'*50)
both = [key1 for key1 in roe_crit.keys() for key2 in eps_crit.keys() if key2==key1]
print(f'The number of companies have both criteria are: ' + str(len(both)))
print(both)
|
"""
FreeRTOS
Copyright (C) 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
http://aws.amazon.com/freertos
http://www.FreeRTOS.org
"""
class OtaTestResult:
"""Object representing a OTA test result.
Attributes
result(str): The result (PASS | FAIL | ERROR) of this testcase.
board(str): The board name of this testcase.
testName(str): The name of this testcase.
jobStatus(str): The job status from AWS IoT, containing both status and status reason provided by devices.
summary(str): The summary for the test result.
Methods:
testResultFromJobStatus(passOrFail, jobStatus, isPositive)
Example:
passingResult = OtaTestResult(OtaTestResult.PASS, 'TI', 'OtaTestGreaterVersion', 'SUCCEEDED (accepted v0.9.1)', 'accepted v0.9.1')
failingResult = OtaTestResult(OtaTestResult.FAIL, 'TI', 'OtaTestGreaterVersion')
errorResult = OtaTestResult(OtaTestResult.ERROR, 'TI')
errorResult = OtaTestResult(OtaTestResult.ERROR, 'TI', 'OtaTestGreaterVersion')
"""
PASS = 'PASS' # The test executed and passed.
FAIL = 'FAIL' # The test executed but failed.
ERROR = 'ERROR' # The test may or may not execute due to test script error.
__HEADER = '\033[95m'
__OKBLUE = '\033[94m'
__OKGREEN = '\033[92m'
__WARNING = '\033[93m'
__FAIL = '\033[91m'
__ENDC = '\033[0m'
__BOLD = '\033[1m'
__UNDERLINE = '\033[4m'
__RESULT_COLOR = {
PASS: __OKGREEN,
FAIL: __FAIL,
ERROR: __WARNING,
}
def __init__(self, *, result, board='', testName='', jobStatus=None, summary=''):
self.result = result
self.board = board
self.testName = testName
self.jobStatus = jobStatus
self.summary = summary
def print(self, elapsed):
print(self.__RESULT_COLOR[self.result] + 'OTA E2E TEST RESULT: ' + self.result)
print(self.__OKBLUE + 'IOT JOB STATUS: ' + (self.jobStatus if self.jobStatus else 'No IoT Job Status'))
print(self.__OKBLUE + 'OTA E2E TEST RESULT SUMMARY: ' + (self.summary if self.summary else 'No Test Summary') + self.__ENDC)
print(self.__BOLD + 'Time Elapsed: ' + str(int(elapsed / 60)) + " Minutes and " + str(int(elapsed % 60)) + " Seconds" + self.__ENDC)
@staticmethod
def testResultFromJobStatus(testName, jobStatus, isPositive, summary):
"""Quickly turn the Test result from OtaAwsAgent into a OtaTest Result.
Args:
testName(str): The name of the test case.
jobStatus(nametuple(status reason)): This is typically from OtaAwsAgent.pollOtaUpdateCompletion() or OtaAwsAgent.__getJobStatus().
isPositive(bool): The flag tells the test case is happy case or not.
Returns an OtaTestResult.
"""
if isPositive:
passOrFail = OtaTestResult.PASS if jobStatus.status == 'SUCCEEDED' else OtaTestResult.FAIL
else:
passOrFail = OtaTestResult.FAIL if jobStatus.status == 'SUCCEEDED' else OtaTestResult.PASS
return OtaTestResult(result=passOrFail, testName=testName, jobStatus=jobStatus.status + ' (' + jobStatus.reason + ')' if jobStatus else "", summary= summary)
|
from collections import OrderedDict
import os
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from apps.app.models import App
from apps.auth.models import User
from apps.award.models import Awardee
from apps.common.tests import GetResponseMixin
class AwardRESTTestCase(APITestCase, GetResponseMixin):
fixtures = [
"apps/common/fixtures/tests/images.json",
"apps/app/fixtures/tests/app_types.json",
"apps/app/fixtures/tests/apps.json",
"apps/auth/fixtures/tests/departments.json",
"apps/auth/fixtures/tests/users.json",
"apps/user_group/fixtures/tests/user_groups.json",
"apps/award/fixtures/tests/awards.json",
]
def setUp(self):
self.normal_user = User.objects.get(username='normal_user')
self.admin_user = User.objects.get(username='admin_user')
self.app_owner_user = User.objects.get(username='app_owner_user')
self.app = App.objects.get(pk=1)
self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'info.xlsx')
def test_normal_user_could_not_import_awardees(self):
self.client.force_authenticate(user=self.normal_user)
url = reverse('importawardees-list', kwargs={'award_id': 1})
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_403_FORBIDDEN)
def test_admin_user_could_not_import_awardees(self):
self.client.force_authenticate(user=self.admin_user)
url = reverse('importawardees-list', kwargs={'award_id': 1})
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_403_FORBIDDEN)
def test_app_owner_could_import_awardees(self):
self.client.force_authenticate(user=self.app_owner_user)
url = reverse('importawardees-list', kwargs={'award_id': 1})
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_200_OK)
def test_app_owner_import_awardees_response(self):
self.client.force_authenticate(user=self.app_owner_user)
url = reverse('importawardees-list', kwargs={'award_id': 1})
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.data, OrderedDict([('status', 200), ('msg', '成功'),
('data',
{'fail': 1, 'totalCount': 5, 'success': 4,
'failed_detail': ['第5行 abcdefg: 奖励描述需要不为空并字数不超过10']
})]))
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.data, OrderedDict([('status', 200), ('msg', '成功'),
('data',
{'totalCount': 5,
'failed_detail': ['第2行 normal_user: 对应用户已经被加入此次奖励',
'第3行 admin_user: 对应用户已经被加入此次奖励',
'第4行 22222222222222: 对应用户已经被加入此次奖励',
'第5行 abcdefg: 奖励描述需要不为空并字数不超过10',
'第6行 app_owner_user: 对应用户已经被加入此次奖励'],
'fail': 5, 'success': 0})]))
def test_app_owner_import_awardees_effect(self):
self.client.force_authenticate(user=self.app_owner_user)
url = reverse('importawardees-list', kwargs={'award_id': 1})
with open(self.file_path, 'rb') as fp:
self.client.post(url, data={'upload': fp})
self.assertTrue(Awardee.objects.filter(user__username='normal_user').exists())
self.assertTrue(Awardee.objects.filter(user__username='admin_user').exists())
self.assertTrue(Awardee.objects.filter(user__username='app_owner_user').exists())
# this one only have phone info in excel file
self.assertTrue(Awardee.objects.filter(user__username='manong').exists())
def test_import_with_target_award_not_exist(self):
self.client.force_authenticate(user=self.app_owner_user)
url = reverse('importawardees-list', kwargs={'award_id': 10000000})
with open(self.file_path, 'rb') as fp:
response = self.client.post(url, data={'upload': fp})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(self._get_business_status_code(response), status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, OrderedDict([('status', 400), ('msg', 'Not found.')]))
|
import numpy as np
import sys
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import os
from scipy import integrate
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class XfluxXequation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, ieos, fext, inuc, element, bconv, tconv, tke_diss, tauL, hp, intc, nsdim, data_prefix):
super(XfluxXequation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
uy = self.getRAdata(eht, 'uy')[intc]
uz = self.getRAdata(eht, 'uz')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
gg = self.getRAdata(eht, 'gg')[intc]
xi = self.getRAdata(eht, 'x' + inuc)[intc]
uxy = self.getRAdata(eht, 'uxy')[intc]
uxz = self.getRAdata(eht, 'uxz')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
dduy = self.getRAdata(eht, 'dduy')[intc]
dduz = self.getRAdata(eht, 'dduz')[intc]
ddgg = self.getRAdata(eht, 'ddgg')[intc]
dduxux = self.getRAdata(eht, 'dduxux')[intc]
dduyuy = self.getRAdata(eht, 'dduyuy')[intc]
dduzuz = self.getRAdata(eht, 'dduzuz')[intc]
uxux = self.getRAdata(eht, 'uxux')[intc]
uxuy = self.getRAdata(eht, 'uxuy')[intc]
uxuz = self.getRAdata(eht, 'uxuz')[intc]
uyuy = self.getRAdata(eht, 'uyuy')[intc]
uzuz = self.getRAdata(eht, 'uzuz')[intc]
ddxi = self.getRAdata(eht, 'ddx' + inuc)[intc]
xiux = self.getRAdata(eht, 'x' + inuc + 'ux')[intc]
xisq = self.getRAdata(eht, 'x' + inuc + 'sq')[intc]
ddxiux = self.getRAdata(eht, 'ddx' + inuc + 'ux')[intc]
ddxidot = self.getRAdata(eht, 'ddx' + inuc + 'dot')[intc]
# print(ddxidot)
# print("-------------------")
# print(ddgg)
ddxidotux = self.getRAdata(eht, 'ddx' + inuc + 'dotux')[intc]
ddxiuxux = self.getRAdata(eht, 'ddx' + inuc + 'uxux')[intc]
ddxiuyuy = self.getRAdata(eht, 'ddx' + inuc + 'uyuy')[intc]
ddxiuzuz = self.getRAdata(eht, 'ddx' + inuc + 'uzuz')[intc]
xiddgg = self.getRAdata(eht, 'x' + inuc + 'ddgg')[intc]
xigradxpp = self.getRAdata(eht, 'x' + inuc + 'gradxpp')[intc]
# Reynolds-averaged mean fields for flux modelling:
ddcp = self.getRAdata(eht, 'ddcp')[intc]
ddtt = self.getRAdata(eht, 'ddtt')[intc]
ddhh = self.getRAdata(eht, 'ddhh')[intc]
ddhhux = self.getRAdata(eht, 'ddhhux')[intc]
ddttsq = self.getRAdata(eht, 'ddttsq')[intc]
uxdivu = self.getRAdata(eht, 'uxdivu')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
gamma1 = self.getRAdata(eht, 'gamma1')[intc]
gamma3 = self.getRAdata(eht, 'gamma3')[intc]
gamma1 = self.getRAdata(eht, 'ux')[intc]
gamma3 = self.getRAdata(eht, 'ux')[intc]
fht_rxx = dduxux - ddux * ddux / dd
fdil = (uxdivu - ux * divu)
# store time series for time derivatives
t_timec = self.getRAdata(eht, 'timec')
t_dd = self.getRAdata(eht, 'dd')
t_ddux = self.getRAdata(eht, 'ddux')
t_ddxi = self.getRAdata(eht, 'ddx' + inuc)
t_ddxiux = self.getRAdata(eht, 'ddx' + inuc + 'ux')
##################
# Xi FLUX EQUATION
##################
# construct equation-specific mean fields
t_fxi = t_ddxiux - t_ddxi * t_ddux / t_dd
fht_ux = ddux / dd
fht_xi = ddxi / dd
rxx = dduxux - ddux * ddux / dd
fxi = ddxiux - ddxi * ddux / dd
fxxi = ddxiuxux - (ddxi / dd) * dduxux - 2. * (ddux / dd) * ddxiux + 2. * ddxi * ddux * ddux / (dd * dd)
# this is for Rogers et al.1989 model
fxi1 = fxi / dd
fxi2 = xiux - xi * ux
# LHS -dq/dt
self.minus_dt_fxi = -self.dt(t_fxi, xzn0, t_timec, intc)
# LHS -div(dduxfxi)
self.minus_div_fht_ux_fxi = -self.Div(fht_ux * fxi, xzn0)
# RHS -div fxxi
self.minus_div_fxxi = -self.Div(fxxi, xzn0)
# RHS -fxi gradx fht_ux
self.minus_fxi_gradx_fht_ux = -fxi * self.Grad(fht_ux, xzn0)
# RHS -rxx d_r xi
self.minus_rxx_gradx_fht_xi = -rxx * self.Grad(fht_xi, xzn0)
# RHS - X''i gradx P - X''_i gradx P'
self.minus_xiff_gradx_pp_minus_xiff_gradx_ppff = \
-(xi * self.Grad(pp, xzn0) - fht_xi * self.Grad(pp, xzn0)) - (xigradxpp - xi * self.Grad(pp, xzn0))
# RHS +uxff_eht_dd_xidot
self.plus_uxff_eht_dd_xidot = +(ddxidotux - (ddux / dd) * ddxidot)
# RHS +gi
self.plus_gi = \
-(ddxiuyuy - (ddxi / dd) * dduyuy - 2. * (dduy / dd) + 2. * ddxi * dduy * dduy / (dd * dd)) / xzn0 - \
(ddxiuzuz - (ddxi / dd) * dduzuz - 2. * (dduz / dd) + 2. * ddxi * dduz * dduz / (dd * dd)) / xzn0 + \
(ddxiuyuy - (ddxi / dd) * dduyuy) / xzn0 + \
(ddxiuzuz - (ddxi / dd) * dduzuz) / xzn0
# -res
self.minus_resXiFlux = -(self.minus_dt_fxi + self.minus_div_fht_ux_fxi + self.minus_div_fxxi +
self.minus_fxi_gradx_fht_ux + self.minus_rxx_gradx_fht_xi +
self.minus_xiff_gradx_pp_minus_xiff_gradx_ppff +
self.plus_uxff_eht_dd_xidot + self.plus_gi)
######################
# END Xi FLUX EQUATION
######################
# -eht_xiddgg + fht_xx eht_ddgg
self.minus_xiddgg = -xiddgg
self.plus_fht_xi_eht_ddgg = fht_xi * ddgg
self.minus_xiddgg_plus_fht_xi_eht_ddgg = -xiddgg + fht_xi * ddgg
# -res
self.minus_resXiFlux2 = -(self.minus_dt_fxi + self.minus_div_fht_ux_fxi + self.minus_div_fxxi +
self.minus_fxi_gradx_fht_ux + self.minus_rxx_gradx_fht_xi +
self.minus_xiddgg_plus_fht_xi_eht_ddgg +
self.plus_uxff_eht_dd_xidot + self.plus_gi)
# variance of temperature fluctuations
sigmatt = (ddttsq - ddtt * ddtt / dd) / dd
# enthalpy flux
fhh = ddhhux - ddhh * ddux / dd
# heat capacity
fht_cp = ddcp / dd
# mlt velocity
alphae = 1.
u_mlt = fhh / (alphae * fht_cp * sigmatt)
# size of convection zone in pressure scale height
cnvzsize = tconv - bconv
# DIFFUSION gradient models
# model 1
alpha0 = 1.5
Dumlt = (1. / 3.) * u_mlt * alpha0 * cnvzsize
self.minus_Dumlt_gradx_fht_xi = -Dumlt * self.Grad(fht_xi, xzn0)
self.model_1 = self.minus_Dumlt_gradx_fht_xi
# model 2 (diffusivity from Reynolds stress)
# self.model_2 = self.minus_rxx_gradx_fht_xi ???
self.plus_gradx_fxi = +self.Grad(fxi, xzn0)
cnst = gamma1
self.minus_cnst_dd_fxi_fdil_o_fht_rxx = -cnst * dd * fxi * fdil / fht_rxx
# read TYCHO's initial model
dir_model = os.path.join(os.path.realpath('.'), 'DATA_D', 'INIMODEL', 'imodel.tycho')
data = np.loadtxt(dir_model, skiprows=26)
nxmax = 500
rr = data[1:nxmax, 2]
vmlt = data[1:nxmax, 8]
u_mltini = np.interp(xzn0, rr, vmlt)
# model 3 (diffusivity calculated with u_MLT from initial model)
alpha1 = 1.5
Dumltini2 = (1. / 3.) * u_mltini * alpha1 * cnvzsize
alpha2 = 1.6
Dumltini3 = (1. / 3.) * u_mltini * alpha2 * cnvzsize
self.model_3 = -Dumltini3 * self.Grad(fht_xi, xzn0)
# model 4 (Dgauss gradient model)
ampl = max(Dumltini3)
xx0 = (bconv + 0.46e8 + tconv) / 2.
width = 4.e7
Dgauss = self.gauss(xzn0, ampl, xx0, width)
self.model_4 = -Dgauss * self.Grad(fht_xi, xzn0)
# model isotropic turbulence
uxffuxff = (dduxux / dd - ddux * ddux / (dd * dd))
uyffuyff = (dduyuy / dd - dduy * dduy / (dd * dd))
uzffuzff = (dduzuz / dd - dduz * dduz / (dd * dd))
uxfuxf = (uxux - ux * ux)
uyfuyf = (uyuy - uy * uy)
uzfuzf = (uzuz - uz * uz)
uxfuyf = (uxuy - ux * uy)
uxfuzf = (uxuz - ux * uz)
cd1 = 100. # assumption
cd2 = 10.
# q = uxffuxff + uyffuyff + uzffuzff
q = uxfuxf + uyfuyf + uzfuzf
self.model_5 = -(dd / (3. * cd1)) * ((q ** 2) / tke_diss) * self.Grad(fht_xi, xzn0)
Drr = +(tauL / cd2) * uxfuxf + uxy * tauL * (tauL / cd2 ** 2) * (-uxfuyf)
Drt = +(tauL / cd2) * uxfuyf - uxy * tauL * (tauL / cd2 ** 2) * (uyfuyf)
Drp = +(tauL / cd2) * uxfuzf - uxy * tauL * (tauL / cd2 ** 2) * (uzfuzf)
Drr1 = +(tauL / cd1) * uxfuxf + uxy * tauL * (tauL / cd1 ** 2) * (-uxfuyf)
Drr2 = +(tauL / cd1) * uxfuxf + uxz * tauL * (tauL / cd1 ** 2) * (-uxfuyf)
self.model_6 = dd * (Drr + Drt + Drp) * self.Grad(fht_xi, xzn0)
self.model_1_rogers1989 = -Drr1 * self.Grad(xi, xzn0)
self.model_2_rogers1989 = -Drr2 * self.Grad(xi, xzn0)
# turbulent thermal diffusion model (the extra term)
self.model_1_rogers1989_minus_turb_thermal_diff = self.model_1_rogers1989 - (Drr1*self.Grad(dd, xzn0)/dd)*xi
self.turb_thermal_diff = (Drr1*self.Grad(dd, xzn0)/dd)*xi
# integral model
intFii = integrate.cumtrapz(self.minus_rxx_gradx_fht_xi+self.minus_xiff_gradx_pp_minus_xiff_gradx_ppff,xzn0,initial=0)
cD = 1.e-16
intFi = cD*integrate.cumtrapz(intFii,xzn0,initial=0)
#self.fhtflxineut = self.getRAdata(eht, 'ddx0001ux')[intc] - self.getRAdata(eht, 'ddx0001')[intc] * ddux / dd
#self.fhtflxiprot = self.getRAdata(eht, 'ddx0002ux')[intc] - self.getRAdata(eht, 'ddx0002')[intc] * ddux / dd
#self.fhtflxihe4 = self.getRAdata(eht, 'ddx0003ux')[intc] - self.getRAdata(eht, 'ddx0003')[intc] * ddux / dd
#self.fhtflxic12 = self.getRAdata(eht, 'ddx0004ux')[intc] - self.getRAdata(eht, 'ddx0004')[intc] * ddux / dd
#self.fhtflxio16 = self.getRAdata(eht, 'ddx0005ux')[intc] - self.getRAdata(eht, 'ddx0005')[intc] * ddux / dd
#self.fhtflxine20 = self.getRAdata(eht, 'ddx0006ux')[intc] - self.getRAdata(eht, 'ddx0006')[intc] * ddux / dd
#self.fhtflxina23 = self.getRAdata(eht, 'ddx0007ux')[intc] - self.getRAdata(eht, 'ddx0007')[intc] * ddux / dd
#self.fhtflximg24 = self.getRAdata(eht, 'ddx0008ux')[intc] - self.getRAdata(eht, 'ddx0008')[intc] * ddux / dd
#self.fhtflxisi28 = self.getRAdata(eht, 'ddx0009ux')[intc] - self.getRAdata(eht, 'ddx0009')[intc] * ddux / dd
#self.fhtflxip31 = self.getRAdata(eht, 'ddx0010ux')[intc] - self.getRAdata(eht, 'ddx0010')[intc] * ddux / dd
#self.fhtflxis32 = self.getRAdata(eht, 'ddx0011ux')[intc] - self.getRAdata(eht, 'ddx0011')[intc] * ddux / dd
#self.fhtflxis34 = self.getRAdata(eht, 'ddx0012ux')[intc] - self.getRAdata(eht, 'ddx0012')[intc] * ddux / dd
#self.fhtflxicl35 = self.getRAdata(eht, 'ddx0013ux')[intc] - self.getRAdata(eht, 'ddx0013')[intc] * ddux / dd
#self.fhtflxiar36 = self.getRAdata(eht, 'ddx0014ux')[intc] - self.getRAdata(eht, 'ddx0014')[intc] * ddux / dd
#
pp = self.getRAdata(eht, 'pp')[intc]
tt = self.getRAdata(eht, 'tt')[intc]
mu = self.getRAdata(eht, 'abar')[intc]
chim = self.getRAdata(eht, 'chim')[intc]
chit = self.getRAdata(eht, 'chit')[intc]
gamma2 = self.getRAdata(eht, 'gamma2')[intc]
# print(chim,chit,gamma2)
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht, 'cp')[intc]
cv = self.getRAdata(eht, 'cv')[intc]
gamma2 = cp / cv # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
lntt = np.log(tt)
lnpp = np.log(pp)
lnmu = np.log(mu)
# calculate temperature gradients
self.nabla = self.deriv(lntt, lnpp)
self.nabla_ad = (gamma2 - 1.) / gamma2
# data for alphaX
self.rfxi = xiux - xi*ux
self.ux_rms = (uxux - ux*ux)**0.5
self.xi_rms = (xisq - xi*xi)**0.5
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.nx = nx
self.inuc = inuc
self.element = element
self.fxi = fxi
self.fxi1 = fxi1
self.fxi2 = fxi2
self.bconv = bconv
self.tconv = tconv
self.dd = dd
self.intFi = intFi
self.intFii = intFii
self.fxxi = fxxi
self.fext = fext
self.nsdim = nsdim
def plot_XfluxX(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xflux stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
# load and calculate DATA to plot
plt1 = self.fxi
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Xflux X for ' + self.element)
plt.plot(grd1, plt1, color='k', label=r'f')
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
idxl, idxr = self.idx_bndry(self.bconv, self.tconv)
self.nabla[0:idxl] = 0.
self.nabla[idxr:self.nx] = 0.
self.nabla_ad[0:idxl] = 0.
self.nabla_ad[idxr:self.nx] = 0.
ind = np.where((self.nabla > self.nabla_ad))[0] # superadiabatic region
xzn0inc = self.xzn0[ind[0]]
xzn0outc = self.xzn0[ind[-1]]
# convective boundary markers - only superadiatic regions
plt.axvline(xzn0inc, linestyle=':', linewidth=0.7, color='k')
plt.axvline(xzn0outc, linestyle=':', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$\overline{\rho} \widetilde{X''_i u''_x}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$\overline{\rho} \widetilde{X''_i u''_r}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxX_' + element + '.png')
def plot_XfluxX2(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xflux stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
xzn0 = np.asarray(grd1)
xlm = np.abs(xzn0 - self.bconv)
xrm = np.abs(xzn0 - self.tconv)
il = int(np.where(xlm == xlm.min())[0][0])
ib = int(np.where(xrm == xrm.min())[0][0])
plt0 = self.fhtflxineut/np.max(np.abs(self.fhtflxineut[il:ib]))
plt1 = self.fhtflxiprot/np.max(np.abs(self.fhtflxiprot[il:ib]))
plt2 = self.fhtflxihe4/np.max(np.abs(self.fhtflxihe4[il:ib]))
plt3 = self.fhtflxic12/np.max(np.abs(self.fhtflxic12[il:ib]))
plt4 = self.fhtflxio16/np.max(np.abs(self.fhtflxio16[il:ib]))
plt5 = self.fhtflxine20/np.max(np.abs(self.fhtflxine20[il:ib]))
plt6 = self.fhtflxina23/np.max(np.abs(self.fhtflxina23[il:ib]))
plt7 = self.fhtflximg24/np.max(np.abs(self.fhtflximg24[il:ib]))
plt8 = self.fhtflxisi28/np.max(np.abs(self.fhtflxisi28[il:ib]))
plt9 = self.fhtflxip31/np.max(np.abs(self.fhtflxip31[il:ib]))
plt10 = self.fhtflxis32/np.max(np.abs(self.fhtflxis32[il:ib]))
plt11 = self.fhtflxis34/np.max(np.abs(self.fhtflxis34[il:ib]))
plt12 = self.fhtflxicl35/np.max(np.abs(self.fhtflxicl35[il:ib]))
plt13 = self.fhtflxiar36/np.max(np.abs(self.fhtflxiar36[il:ib]))
#plt0 = self.fhtflxineut
#plt1 = self.fhtflxiprot
#plt2 = self.fhtflxihe4
#plt3 = self.fhtflxic12
#plt4 = self.fhtflxio16
#plt5 = self.fhtflxine20
#plt6 = self.fhtflxina23
#plt7 = self.fhtflximg24
#plt8 = self.fhtflxisi28
#plt9 = self.fhtflxip31
#plt10 = self.fhtflxis32
#plt11 = self.fhtflxis34
#plt12 = self.fhtflxicl35
#plt13 = self.fhtflxiar36
fig, ax1 = plt.subplots(figsize=(7, 6))
to_plot = [plt0,plt1,plt2,plt3,plt4,plt5,plt6,plt7,plt8,plt9,plt10,plt11,plt12,plt13]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('scaled Xflux X')
plt.plot(grd1, plt0, label=r"neut")
plt.plot(grd1, plt1, label=r"$^{1}$H")
plt.plot(grd1, plt2, label=r"$^{4}$He")
plt.plot(grd1, plt3, label=r"$^{12}$C")
plt.plot(grd1, plt4, label=r"$^{16}$O")
plt.plot(grd1, plt5, label=r"$^{20}$Ne")
plt.plot(grd1, plt6, label=r"$^{23}$Na")
#plt.plot(grd1, plt7, label=r"$^{24}$Mg")
#plt.plot(grd1, plt8, label=r"$^{28}$Si")
#plt.plot(grd1, plt9, label=r"$^{31}$P")
#plt.plot(grd1, plt10, label=r"$^{32}$S")
#plt.plot(grd1, plt11, label=r"$^{34}$S")
#plt.plot(grd1, plt12, label=r"$^{35}$Cl")
#plt.plot(grd1, plt13, label=r"$^{36}$Ar")
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$\overline{\rho} \widetilde{X''_i u''_x}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$\frac{f_X}{max(|f_X|)}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=1, prop={'size': 14},ncol=2)
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxX2_1' + element + '.png')
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxX2_1' + element + '.eps')
def plot_XfluxxX(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xflux stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
# load and calculate DATA to plot
plt1 = self.fxxi
plt_model_int = self.intFii
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1, plt_model_int]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('flux of Xflux X for ' + self.element + " " + str(self.nsdim) + "D")
plt.plot(grd1, plt1, color='k', label=r'f')
plt.plot(grd1, plt_model_int, color='r', label=r'int model')
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$\overline{\rho} \widetilde{X''_i u''_x u''_x}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$\overline{\rho} \widetilde{X''_i u''_r u''_r}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxxX_' + element + '.png')
def plot_XfluxXRogers1989(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xflux stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
# load and calculate DATA to plot
plt1 = self.dd*self.fxi1
plt2 = self.dd*self.fxi2
plt_model_Dumlt = self.dd*self.model_1 # self.model_1 = self.minus_Dumlt_gradx_fht_xi ; Dumlt = (1./3.)*u_mlt*alpha0*cnvzsize
# plt_model_Drxx = self.dd*self.model_2 # self.model_2 = self.minus_rxx_gradx_fht_xi
plt_model_Dumltini = self.dd*self.model_3 # self.model_3 = -Dumltini3 * self.Grad(fht_xi, xzn0)
plt_model_Dgauss = self.dd*self.model_4 # self.model_4 = -Dgauss * self.Grad(fht_xi, xzn0)
plt_model_1_rogers1989 = self.dd*self.model_1_rogers1989
plt_model_2_rogers1989 = self.dd*self.model_2_rogers1989
plt_model_1_rogers1989_minus_turb_thermal_diff = self.dd*self.model_1_rogers1989_minus_turb_thermal_diff
# plt_model_int = self.intFi
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1, plt2, plt_model_Dumlt, plt_model_Dumltini,
plt_model_Dgauss, plt_model_1_rogers1989, plt_model_2_rogers1989,
plt_model_1_rogers1989_minus_turb_thermal_diff]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Xflux for ' + self.element)
plt.plot(grd1, plt1, color='k', label=r"$+\overline{\rho}\widetilde{X''u''_r}$")
# plt.plot(grd1, plt2, color='r', linestyle='--', label=r"$+\overline{\rho}\overline{X'u'_r}$")
plt.plot(grd1, plt_model_Dumlt, color='r', label=r"$-D_{MLT} \partial_r \widetilde{X}$")
# plt.plot(grd1, plt_model_Dumltini, color='pink', label=r"$-D_{MLT}^{ini} \partial_r \widetilde{X}$")
plt.plot(grd1, plt_model_Dgauss, color='b', label=r"$-D_{MLT}^{gauss} \partial_r \widetilde{X}$")
plt.plot(grd1, plt_model_1_rogers1989, color='g', label=r"$Rogers (1)$")
# plt.plot(grd1, plt_model_2_rogers1989, color='y', label=r"$Rogers (2)$")
plt.plot(grd1, plt_model_1_rogers1989_minus_turb_thermal_diff, color='m', label=r"$Rogers (1) - D (\nabla \rho / \rho) X$")
# plt.plot(grd1, plt_model_int, color='yellow', label=r"$int model$")
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# convective boundary markers
# plt.axvline(self.bconv,linestyle='--',linewidth=0.7,color='k')
# plt.axvline(self.tconv,linestyle='--',linewidth=0.7,color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$f$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$f$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 13}, ncol=2)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxXRogers1989models_' + element + '.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxXRogers1989models_' + element + '.eps')
def plot_Xflux_gradient(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xflux stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
# load and calculate DATA to plot
plt1 = self.plus_gradx_fxi
plt2 = self.minus_cnst_dd_fxi_fdil_o_fht_rxx
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1, plt2]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('grad Xflux for ' + self.element)
plt.plot(grd1, plt1, color='k', label=r"$+\partial_r f$")
plt.plot(grd1, plt2, color='r', label=r"$.$")
# convective boundary markers
plt.axvline(self.bconv + 0.46e8, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# convective boundary markers
# plt.axvline(self.bconv,linestyle='--',linewidth=0.7,color='k')
# plt.axvline(self.tconv,linestyle='--',linewidth=0.7,color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$\partial_r \overline{\rho} \widetilde{X''_i u''_r}$ (g cm$^{-3}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$\partial_r \overline{\rho} \widetilde{X''_i u''_r}$ (g cm$^{-3}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_model_XfluxX_' + element + '.png')
def plot_XfluxX_equation(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xi flux equation in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_fxi
lhs1 = self.minus_div_fht_ux_fxi
rhs0 = self.minus_div_fxxi
rhs1 = self.minus_fxi_gradx_fht_ux
rhs2 = self.minus_rxx_gradx_fht_xi
rhs3 = self.minus_xiff_gradx_pp_minus_xiff_gradx_ppff
rhs4 = self.plus_uxff_eht_dd_xidot
rhs5 = self.plus_gi
res = self.minus_resXiFlux
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Xflux X equation for ' + self.element + " " + str(self.nsdim) + "D")
if self.ig == 1:
plt.plot(grd1, lhs0, color='#8B3626', label=r'$-\partial_t f_i$')
plt.plot(grd1, lhs1, color='#FF7256', label=r'$-\nabla_x (\widetilde{u}_x f)$')
plt.plot(grd1, rhs0, color='b', label=r'$-\nabla_x f^x_i$')
plt.plot(grd1, rhs1, color='g', label=r'$-f_i \partial_x \widetilde{u}_x$')
plt.plot(grd1, rhs2, color='r', label=r'$-R_{xx} \partial_x \widetilde{X}$')
# plt.plot(grd1, rhs3, color='cyan',
# label=r"$-\overline{X''} \partial_x \overline{P} - \overline{X'' \partial_x P'}$")
plt.plot(grd1, rhs3, color='cyan',
label=r"$-\overline{X'' \partial_x P}$")
plt.plot(grd1, rhs4, color='purple', label=r"$+\overline{u''_x \rho \dot{X}}$")
# plt.plot(grd1,rhs5,color='yellow',label=r'$+G$')
plt.plot(grd1, res, color='k', linestyle='--', label='res')
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#8B3626', label=r'$-\partial_t f_i$')
plt.plot(grd1, lhs1, color='#FF7256', label=r'$-\nabla_r (\widetilde{u}_r f)$')
plt.plot(grd1, rhs0, color='b', label=r'$-\nabla_r f^r_i$')
plt.plot(grd1, rhs1, color='g', label=r'$-f_i \partial_r \widetilde{u}_r$')
plt.plot(grd1, rhs2, color='r', label=r'$-R_{rr} \partial_r \widetilde{X}$')
plt.plot(grd1, rhs3, color='cyan',
label=r"$-\overline{X''} \partial_r \overline{P} - \overline{X'' \partial_r P'}$")
plt.plot(grd1, rhs4, color='purple', label=r"$+\overline{u''_r \rho \dot{X}}$")
plt.plot(grd1, rhs5, color='yellow', label=r'$+G$')
plt.plot(grd1, res, color='k', linestyle='--', label='res')
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"g cm$^{-2}$ s$^{-2}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"g cm$^{-2}$ s$^{-2}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 12}, ncol=2)
# this is another inset axes over the main axes
plt.rc('font', size=12.)
a = plt.axes([0.24, 0.25, .3, .2])
ilft = 0
irgt = 64
plt.plot(grd1[ilft:irgt], lhs0[ilft:irgt], color='#8B3626')
plt.plot(grd1[ilft:irgt], lhs1[ilft:irgt], color='#FF7256')
plt.plot(grd1[ilft:irgt], rhs0[ilft:irgt], color='b')
plt.plot(grd1[ilft:irgt], rhs1[ilft:irgt], color='g')
plt.plot(grd1[ilft:irgt], rhs2[ilft:irgt], color='r')
plt.plot(grd1[ilft+2:irgt], rhs3[ilft+2:irgt], color='cyan')
plt.plot(grd1[ilft:irgt], rhs4[ilft:irgt], color='purple')
# plt.plot(grd1[ilft:irgt], rhs5[ilft:irgt], color='yellow')
plt.plot(grd1[ilft+2:irgt], res[ilft+2:irgt], color='k', linestyle='--', label='res')
# plt.xticks([])
# plt.yticks([])
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
plt.xlabel(setxlabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
plt.xlabel(setxlabel)
plt.rc('font', size=16.)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxXequation_' + element + '.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxXequation_' + element + '.eps')
def plot_XfluxX_equation2(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot Xi flux equation in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
lhs0 = self.minus_dt_fxi
lhs1 = self.minus_div_fht_ux_fxi
rhs0 = self.minus_div_fxxi
rhs1 = self.minus_fxi_gradx_fht_ux
rhs2 = self.minus_rxx_gradx_fht_xi
# rhs3 = self.minus_xiff_gradx_pp_minus_xiff_gradx_ppff
# rhs3 = self.minus_xiddgg_plus_fht_xi_eht_ddgg
rhs3a = self.minus_xiddgg
rhs3b = self.plus_fht_xi_eht_ddgg
rhs3 = rhs3a + rhs3b
rhs4 = self.plus_uxff_eht_dd_xidot
rhs5 = self.plus_gi
res = self.minus_resXiFlux2
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [lhs0, lhs1, rhs0, rhs1, rhs2, rhs3, rhs4, rhs5, res]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Xflux X equation for ' + self.element)
if self.ig == 1:
plt.plot(grd1, lhs0, color='#8B3626', label=r'$-\partial_t f_i$')
plt.plot(grd1, lhs1, color='#FF7256', label=r'$-\nabla_x (\widetilde{u}_x f)$')
plt.plot(grd1, rhs0, color='b', label=r'$-\nabla_x f^x_i$')
plt.plot(grd1, rhs1, color='g', label=r'$-f_i \partial_x \widetilde{u}_x$')
plt.plot(grd1, rhs2, color='r', label=r'$-R_{xx} \partial_x \widetilde{X}$')
plt.plot(grd1, rhs3, color='cyan', label=r"$-\overline{X \rho g_x} + \widetilde{X} \overline{\rho g_x}$")
plt.plot(grd1, rhs4, color='purple', label=r"$+\overline{u''_x \rho \dot{X}}$")
# plt.plot(grd1,rhs5,color='yellow',label=r'$+G$')
plt.plot(grd1, res, color='k', linestyle='--', label='res')
elif self.ig == 2:
plt.plot(grd1, lhs0, color='#8B3626', label=r'$-\partial_t f_i$')
plt.plot(grd1, lhs1, color='#FF7256', label=r'$-\nabla_r (\widetilde{u}_r f)$')
plt.plot(grd1, rhs0, color='b', label=r'$-\nabla_r f^r_i$')
plt.plot(grd1, rhs1, color='g', label=r'$-f_i \partial_r \widetilde{u}_r$')
plt.plot(grd1, rhs2, color='r', label=r'$-R_{rr} \partial_r \widetilde{X}$')
plt.plot(grd1, rhs3, color='cyan', label=r"$-\overline{X \rho g_r} - \widetilde{X} \overline{\rho g_r}$")
# plt.plot(grd1,rhs3a,color='cyan',label=r"$-\overline{X \rho g_r}$")
# plt.plot(grd1,rhs3b,color='brown',label=r"$+\widetilde{X} \overline{\rho g_r}$")
plt.plot(grd1, rhs4, color='purple', label=r"$+\overline{u''_r \rho \dot{X}}$")
plt.plot(grd1, rhs5, color='yellow', label=r'$+G$')
plt.plot(grd1, res, color='k', linestyle='--', label='res')
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"g cm$^{-2}$ s$^{-2}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"g cm$^{-2}$ s$^{-2}$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 10})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_XfluxXequation2_' + element + '.png')
def gauss(self, x, a, x0, sigma):
return a * np.exp(-(x - x0) ** 2 / (2 * (sigma ** 2)))
def plot_alphaX(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot alphaX stratification in the model"""
if self.ig != 1 and self.ig != 2:
print("ERROR(XfluxXEquation.py):" + self.errorGeometry(self.ig))
sys.exit()
# convert nuc ID to string
xnucid = str(self.inuc)
element = self.element
# load x GRID
grd1 = self.xzn0
# load and calculate DATA to plot
plt1 = self.rfxi
plt2 = self.ux_rms*self.xi_rms
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1,plt2]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('Xflux X for ' + self.element)
plt.plot(grd1, plt1, color='k', label=r"$+\overline{X'_i u'_x}$")
plt.plot(grd1, plt2, color='r', label=r"$+X'_{rms} u'_{rms}$")
# convective boundary markers
plt.axvline(self.bconv, linestyle='--', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='--', linewidth=0.7, color='k')
idxl, idxr = self.idx_bndry(self.bconv, self.tconv)
self.nabla[0:idxl] = 0.
self.nabla[idxr:self.nx] = 0.
self.nabla_ad[0:idxl] = 0.
self.nabla_ad[idxr:self.nx] = 0.
ind = np.where((self.nabla > self.nabla_ad))[0] # superadiabatic region
xzn0inc = self.xzn0[ind[0]]
xzn0outc = self.xzn0[ind[-1]]
# convective boundary markers - only superadiatic regions
plt.axvline(xzn0inc, linestyle=':', linewidth=0.7, color='k')
plt.axvline(xzn0outc, linestyle=':', linewidth=0.7, color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"$\overline{X'_i u'_x}$ (g cm$^{-2}$ s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"$\overline{X'_i u'_r}$ (cm s$^{-1}$)"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_alphaX_' + element + '.png') |
<reponame>Stevanus-Christian/tensorflow<gh_stars>0
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sharing datasets across training jobs."""
from absl.testing import parameterized
from tensorflow.python.data.experimental.kernel_tests.service import test_base as data_service_test_base
from tensorflow.python.data.experimental.ops import data_service_ops
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class CrossTrainerCacheTest(data_service_test_base.TestBase,
parameterized.TestCase):
"""Tests for sharing datasets across jobs using a cross-trainer cache."""
# V2-only because in the V1 API, `map` does not preserve cardinality.
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testEnableCrossTrainerCache(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
# The second client reads the same data from the cross-trainer cache.
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
self.assertDatasetProduces(dataset2.take(10), list(range(10)))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testDisableCrossTrainerCacheByDefault(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(dataset, cluster, job_name="job")
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
# The two clients use the same job. The second client can't read the data
# already read by the first client.
dataset2 = self.make_distributed_dataset(dataset, cluster, job_name="job")
output = self.getDatasetOutput(dataset2.take(10))
self.assertGreaterEqual(output[0], 10)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testConcurrentReaders(self):
cluster = self._create_cluster(
num_workers=1, cross_trainer_cache_size_bytes=18000)
num_readers = 20
num_elements = 50
dataset = dataset_ops.Dataset.range(10000000).repeat()
datasets = []
iterators = []
for i in range(num_readers):
distributed_dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id=f"Trainer {i}"),
max_outstanding_requests=1)
iterator = self.getNext(distributed_dataset)
datasets.append(distributed_dataset)
iterators.append(iterator)
for i in range(num_elements):
# All the readers read the same element in one step.
for j in range(num_readers):
self.assertEqual(self.evaluate(iterators[j]()), i)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testSlowClientSkipsData(self):
cluster = self._create_cluster(
num_workers=1, cross_trainer_cache_size_bytes=500)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(200), list(range(200)))
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
dataset2 = dataset2.take(200)
output = self.getDatasetOutput(dataset2)
# When the cache is small, the second trainer couldn't read the beginning of
# the dataset. It can still read 100 elements from the dataset, because the
# dataset is infinite.
self.assertGreater(output[0], 0)
self.assertEqual(self.evaluate(dataset2.cardinality()), 200)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testSmallCache(self):
cluster = self._create_cluster(
num_workers=1, cross_trainer_cache_size_bytes=500)
dataset = dataset_ops.Dataset.range(10000000).repeat()
num_readers = 20
for i in range(num_readers):
# Even if the cache is small and may discard old data, each trainer can
# still read the required number of elements because the input dataset is
# infinite.
distributed_dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id=f"Trainer {i}"))
output = self.getDatasetOutput(distributed_dataset.take(200))
self.assertLen(output, 200)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testShuffleDataset(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat().shuffle(
buffer_size=100)
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
output1 = self.getDatasetOutput(dataset1.take(10))
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
output2 = self.getDatasetOutput(dataset2.take(10))
self.assertEqual(output1, output2)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testSameTrainerID(self):
# Jobs from the same training cluster do not reuse data from the cache.
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer ID"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer ID"))
output = self.getDatasetOutput(dataset2.take(10))
self.assertGreaterEqual(output[0], 10)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testDifferentJobNames(self):
# TODO(b/221104308): Disallow this use case because it increases RAM usage.
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job1",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job2",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
self.assertDatasetProduces(dataset2.take(10), list(range(10)))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testCrossTrainerCacheMemoryLimit(self):
# TODO(b/221104308): Add a validation to check enough RAM is available.
pass
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testDynamicSharding(self):
cluster = self._create_cluster(num_workers=2)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
processing_mode=data_service_ops.ShardingPolicy.DYNAMIC,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
output1 = self.getDatasetOutput(dataset1.take(100))
# The second client reads the same data from the cross-trainer cache.
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
processing_mode=data_service_ops.ShardingPolicy.DYNAMIC,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
output2 = self.getDatasetOutput(dataset2.take(100))
# Verifies the intersection is non-empty.
self.assertTrue(set(output1) & set(output2))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testNoCompression(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
compression=None,
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
compression=None,
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
self.assertDatasetProduces(dataset2.take(10), list(range(10)))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testCompressionMismatch(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Data type mismatch"):
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
compression=None,
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.getDatasetOutput(dataset2)
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testRequiresJobName(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Cross-trainer caching requires named jobs. Got empty `job_name`."):
dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name=None,
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.getDatasetOutput(dataset)
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testDisallowFiniteDataset(self):
cluster = self._create_cluster(num_workers=1)
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Cross-trainer caching requires the input dataset to be infinite."):
dataset = self.make_distributed_range_dataset(
10,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.getDatasetOutput(dataset)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager"])))
def testMultipleIterationsForOneDatasetEagerMode(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
# In the eager mode, each iteration creates a new data service job and does
# not reuse cached data. We disallow this use case.
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Cross-trainer caching requires infinite datasets and disallows "
"multiple iterations of the same dataset."):
self.getDatasetOutput(dataset1.take(10))
self.getDatasetOutput(dataset1.take(10))
self.getDatasetOutput(dataset1.take(10))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["graph"])))
def testMultipleIterationsForOneDatasetGraphMode(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
# These clients are assumed to be from the same training cluster. Thus, they
# do not reuse data from the cross-trainer cache.
output1 = self.getDatasetOutput(dataset1.take(10))
output1 += self.getDatasetOutput(dataset1.take(10))
output1 += self.getDatasetOutput(dataset1.take(10))
self.assertLen(set(output1), 30)
dataset2 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 2"))
# These clients reuse some data from the previous clients (not exactly the
# same data due to client-side buffering).
output2 = self.getDatasetOutput(dataset2.take(10))
output2 += self.getDatasetOutput(dataset2.take(10))
output2 += self.getDatasetOutput(dataset2.take(10))
self.assertTrue(set(output1) & set(output2))
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testDisallowCoordinatedRead(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Cross-trainer caching does not support coordinated reads."):
dataset = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
num_consumers=1,
consumer_index=0,
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.getDatasetOutput(dataset)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode=["eager", "graph"])))
def testNamedJobMismatch(self):
cluster = self._create_cluster(num_workers=1)
dataset = dataset_ops.Dataset.range(10000000).repeat()
dataset1 = self.make_distributed_dataset(
dataset,
cluster,
job_name="job",
cross_trainer_cache=data_service_ops.CrossTrainerCache(
trainer_id="Trainer 1"))
self.assertDatasetProduces(dataset1.take(10), list(range(10)))
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Existing cross-trainer cache: <enabled>; got <disabled>"):
dataset2 = self.make_distributed_dataset(
dataset, cluster, job_name="job", cross_trainer_cache=None)
self.getDatasetOutput(dataset2)
def _create_cluster(self,
num_workers,
cross_trainer_cache_size_bytes=10 * (2**30)):
cluster = data_service_test_base.TestCluster(num_workers=0)
for _ in range(num_workers):
worker = data_service_test_base.TestWorker(
dispatcher_address=cluster.dispatcher_address(),
shutdown_quiet_period_ms=0,
cross_trainer_cache_size_bytes=cross_trainer_cache_size_bytes)
worker.start()
cluster.workers.append(worker)
return cluster
if __name__ == "__main__":
test.main()
|
"""
@brief test log(time=400s)
"""
import sys
import unittest
from logging import getLogger
import numpy
import pandas
from onnxruntime import InferenceSession
from pyquickhelper.pycode import ExtTestCase, skipif_circleci, ignore_warnings
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from skl2onnx.common.data_types import (
StringTensorType, FloatTensorType, Int64TensorType,
BooleanTensorType, DoubleTensorType)
from mlprodict.onnxrt import OnnxInference
from mlprodict.onnx_conv import register_converters, to_onnx
from mlprodict.tools.asv_options_helper import get_ir_version_from_onnx
class TestOnnxrtRuntimeLightGbm(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
register_converters()
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_missing(self):
from mlprodict.onnx_conv.operator_converters.parse_lightgbm import (
WrappedLightGbmBooster)
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=1))
self.assertEqual(r.tolist(), [0, 1])
r = WrappedLightGbmBooster._generate_classes( # pylint: disable=W0212
dict(num_class=3))
self.assertEqual(r.tolist(), [0, 1, 2])
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame(
{"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1] * 150)
X_test = pandas.DataFrame(
{"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()), ('B', Int64TensorType()),
('C', FloatTensorType()), ('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 2))
self.assertEqual(exp.shape, (X_test.shape[0], 2))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical3(self):
from lightgbm import LGBMClassifier
X = pandas.DataFrame(
{"A": numpy.random.permutation(['a', 'b', 'c', 'd'] * 75), # str
# int
"B": numpy.random.permutation([1, 2, 3] * 100),
# float
"C": numpy.random.permutation([0.1, 0.2, -0.1, -0.1, 0.2] * 60),
# bool
"D": numpy.random.permutation([True, False] * 150),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y', 'x', 'w', 'v'] * 60),
ordered=True)}) # str and ordered categorical
y = numpy.random.permutation([0, 1, 2] * 100)
X_test = pandas.DataFrame(
{"A": numpy.random.permutation(['a', 'b', 'e'] * 20), # unseen category
"B": numpy.random.permutation([1, 3] * 30),
"C": numpy.random.permutation([0.1, -0.1, 0.2, 0.2] * 15),
"D": numpy.random.permutation([True, False] * 30),
"E": pandas.Categorical(numpy.random.permutation(['z', 'y'] * 30),
ordered=True)})
cat_cols_actual = ["A", "B", "C", "D"]
X[cat_cols_actual] = X[cat_cols_actual].astype('category')
X_test[cat_cols_actual] = X_test[cat_cols_actual].astype('category')
gbm0 = LGBMClassifier().fit(X, y)
exp = gbm0.predict(X_test, raw_scores=False)
self.assertNotEmpty(exp)
init_types = [('A', StringTensorType()),
('B', Int64TensorType()),
('C', FloatTensorType()),
('D', BooleanTensorType()),
('E', StringTensorType())]
self.assertRaise(lambda: to_onnx(gbm0, initial_types=init_types), RuntimeError,
"at most 1 input(s) is(are) supported")
X = X[['C']].values.astype(numpy.float32)
X_test = X_test[['C']].values.astype(numpy.float32)
gbm0 = LGBMClassifier().fit(X, y, categorical_feature=[0])
exp = gbm0.predict_proba(X_test, raw_scores=False)
model_def = to_onnx(gbm0, X)
self.assertIn('ZipMap', str(model_def))
oinf = OnnxInference(model_def)
y = oinf.run({'X': X_test})
self.assertEqual(list(sorted(y)),
['output_label', 'output_probability'])
df = pandas.DataFrame(y['output_probability'])
self.assertEqual(df.shape, (X_test.shape[0], 3))
self.assertEqual(exp.shape, (X_test.shape[0], 3))
# self.assertEqualArray(exp, df.values, decimal=6)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
y_train = y_train % 2
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt", "learning_rate": 0.05,
"n_estimators": 2, "objective": "binary",
"max_bin": 5, "min_child_samples": 100,
'verbose': -1}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_booster3(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt", "learning_rate": 0.05,
"n_estimators": 2, "objective": "binary",
"max_bin": 5, "min_child_samples": 100,
'verbose': -1}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', Int64TensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_booster3_real(self):
from lightgbm import LGBMClassifier, Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.float32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
# Classic
gbm = LGBMClassifier()
gbm.fit(X_train, y_train)
exp = gbm.predict_proba(X_test)
onx = to_onnx(gbm.booster_, initial_types=[
('X', FloatTensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
# categorical_feature=[0, 1]
train_data = Dataset(
X_train, label=y_train,
feature_name=['c1', 'c2', 'c3', 'c4'],
categorical_feature=['c1', 'c2'])
params = {
"boosting_type": "gbdt", "learning_rate": 0.05,
"n_estimators": 2, "objective": "multiclass",
"max_bin": 5, "min_child_samples": 100,
'verbose': -1, 'num_class': 3}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, initial_types=[
('X', FloatTensorType([None, X_train.shape[1]]))])
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run({'X': X_test})
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values, decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_onnxrt_python_lightgbm_categorical_iris_dataframe(self):
from lightgbm import Dataset, train as lgb_train
iris = load_iris()
X, y = iris.data, iris.target
X = (X * 10).astype(numpy.int32)
X_train, X_test, y_train, _ = train_test_split(
X, y, random_state=11)
other_x = numpy.random.randint(
0, high=10, size=(1500, X_train.shape[1]))
X_train = numpy.vstack([X_train, other_x]).astype(dtype=numpy.int32)
y_train = numpy.hstack(
[y_train, numpy.zeros(500) + 3, numpy.zeros(500) + 4,
numpy.zeros(500) + 5]).astype(dtype=numpy.int32)
self.assertEqual(y_train.shape, (X_train.shape[0], ))
y_train = y_train % 2
df_train = pandas.DataFrame(X_train)
df_train.columns = ['c1', 'c2', 'c3', 'c4']
df_train['c1'] = df_train['c1'].astype('category')
df_train['c2'] = df_train['c2'].astype('category')
df_train['c3'] = df_train['c3'].astype('category')
df_train['c4'] = df_train['c4'].astype('category')
df_test = pandas.DataFrame(X_test)
df_test.columns = ['c1', 'c2', 'c3', 'c4']
df_test['c1'] = df_test['c1'].astype('category')
df_test['c2'] = df_test['c2'].astype('category')
df_test['c3'] = df_test['c3'].astype('category')
df_test['c4'] = df_test['c4'].astype('category')
# categorical_feature=[0, 1]
train_data = Dataset(
df_train, label=y_train)
params = {
"boosting_type": "gbdt", "learning_rate": 0.05,
"n_estimators": 2, "objective": "binary",
"max_bin": 5, "min_child_samples": 100,
'verbose': -1}
booster = lgb_train(params, train_data)
exp = booster.predict(X_test)
onx = to_onnx(booster, df_train)
self.assertIn('ZipMap', str(onx))
oif = OnnxInference(onx)
got = oif.run(df_test)
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
onx.ir_version = get_ir_version_from_onnx()
oif = OnnxInference(onx, runtime='onnxruntime1')
got = oif.run(df_test)
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
onx = to_onnx(booster, df_train,
options={booster.__class__: {'cast': True}})
self.assertIn('op_type: "Cast"', str(onx))
oif = OnnxInference(onx)
got = oif.run(df_test)
values = pandas.DataFrame(got['output_probability']).values
self.assertEqualArray(exp, values[:, 1], decimal=5)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_lightgbm_booster_classifier(self):
from lightgbm import Dataset, train as lgb_train
X = numpy.array([[0, 1], [1, 1], [2, 0], [1, 2]], dtype=numpy.float32)
y = [0, 1, 0, 1]
data = Dataset(X, label=y)
model = lgb_train({'boosting_type': 'rf', 'objective': 'binary',
'n_estimators': 3, 'min_child_samples': 1,
'subsample_freq': 1, 'bagging_fraction': 0.5,
'feature_fraction': 0.5},
data)
model_onnx = to_onnx(model, X, verbose=0, rewrite_ops=True)
self.assertNotEmpty(model_onnx)
# missing values
@staticmethod
def _predict_with_onnx(model, X):
session = InferenceSession(model.SerializeToString())
output_names = [s_output.name for s_output in session.get_outputs()]
input_names = [s_input.name for s_input in session.get_inputs()]
if len(input_names) > 1:
raise RuntimeError(
"Test expects one input. Found multiple inputs: %r."
"" % input_names)
input_name = input_names[0]
return session.run(output_names, {input_name: X})[0][:, 0]
def _assert_almost_equal(self, actual, desired, decimal=7, frac=1.0, msg=""):
self.assertGreater(frac, 0)
self.assertLesser(frac, 1)
success_abs = (abs(actual - desired) <= (10 ** -decimal)).sum()
success_rel = success_abs / len(actual)
if success_abs == 0:
raise AssertionError(
"Wrong conversion. %s\n-----\n%r\n------\n%r"
"" % (msg, desired[:5], actual[:5]))
self.assertGreater(success_rel, frac)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_missing_values(self):
from lightgbm import LGBMRegressor
_N_DECIMALS = 5
_FRAC = 0.9999
_y = numpy.array([0, 0, 1, 1, 1])
_X_train = numpy.array([[1.0, 0.0], [1.0, -1.0], [1.0, -1.0],
[2.0, -1.0], [2.0, -1.0]],
dtype=numpy.float32)
_X_test = numpy.array([[1.0, numpy.nan]], dtype=numpy.float32)
_INITIAL_TYPES = [
("input", FloatTensorType([None, _X_train.shape[1]]))]
regressor = LGBMRegressor(
objective="regression", min_data_in_bin=1, min_data_in_leaf=1,
n_estimators=1, learning_rate=1)
regressor.fit(_X_train, _y)
regressor_onnx = to_onnx(
regressor, initial_types=_INITIAL_TYPES, rewrite_ops=True)
y_pred = regressor.predict(_X_test)
y_pred_onnx = self._predict_with_onnx(regressor_onnx, _X_test)
self._assert_almost_equal(
y_pred, y_pred_onnx, decimal=_N_DECIMALS, frac=_FRAC,
msg="Missing values.")
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_missing_values_rf(self):
from lightgbm import LGBMRegressor
_N_DECIMALS = 5
_FRAC = 0.9999
_y = numpy.array([0, 0, 1, 1, 1])
_X_train = numpy.array([[1.0, 0.0], [1.0, -1.0], [1.0, -1.0],
[2.0, -1.0], [2.0, -1.0]],
dtype=numpy.float32)
_X_test = numpy.array([[1.0, numpy.nan]], dtype=numpy.float32)
_INITIAL_TYPES = [
("input", FloatTensorType([None, _X_train.shape[1]]))]
regressor = LGBMRegressor(
objective="regression", boosting_type='rf',
n_estimators=10, bagging_freq=1, bagging_fraction=0.5)
regressor.fit(_X_train, _y)
regressor_onnx = to_onnx(
regressor, initial_types=_INITIAL_TYPES, rewrite_ops=True)
y_pred = regressor.predict(_X_test)
y_pred_onnx = self._predict_with_onnx(regressor_onnx, _X_test)
self._assert_almost_equal(
y_pred, y_pred_onnx, decimal=_N_DECIMALS, frac=_FRAC,
msg="Missing values.")
# objectives
@staticmethod
def _calc_initial_types(X):
_DTYPE_MAP = {"float64": DoubleTensorType,
"float32": FloatTensorType}
dtypes = set(str(dtype) for dtype in X.dtypes)
if len(dtypes) > 1:
raise RuntimeError(
"Test expects homogenous input matrix. Found multiple dtypes: %r." % dtypes)
dtype = dtypes.pop()
tensor_type = _DTYPE_MAP[dtype]
return [("input", tensor_type(X.shape))]
@staticmethod
def _predict_with_onnx(model, X):
session = InferenceSession(model.SerializeToString())
output_names = [s_output.name for s_output in session.get_outputs()]
input_names = [s_input.name for s_input in session.get_inputs()]
if len(input_names) > 1:
raise RuntimeError(
"Test expects one input. Found multiple inputs: %r." % input_names)
input_name = input_names[0]
if hasattr(X, "values"):
return session.run(output_names, {input_name: X.values})[0][:, 0]
return session.run(output_names, {input_name: X})[0][:, 0]
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_objective(self):
from lightgbm import LGBMRegressor
_N_ROWS = 10000
_N_COLS = 10
_N_DECIMALS = 5
_FRAC = 0.9997
_X = pandas.DataFrame(numpy.random.random(
size=(_N_ROWS, _N_COLS)).astype(numpy.float32))
_Y = pandas.Series(numpy.random.random(size=_N_ROWS))
_objectives = ("regression", "poisson", "gamma")
for objective in _objectives:
with self.subTest(X=_X, objective=objective):
initial_types = self._calc_initial_types(_X)
regressor = LGBMRegressor(objective=objective)
regressor.fit(_X, _Y)
regressor_onnx = to_onnx(
regressor, initial_types=initial_types,
rewrite_ops=True)
y_pred = regressor.predict(_X)
y_pred_onnx = self._predict_with_onnx(regressor_onnx, _X)
self._assert_almost_equal(
y_pred, y_pred_onnx, decimal=_N_DECIMALS, frac=_FRAC,
msg="Objective=%r" % objective)
@skipif_circleci('stuck')
@unittest.skipIf(sys.platform == 'darwin', 'stuck')
@ignore_warnings((RuntimeWarning, UserWarning))
def test_objective_boosting_rf(self):
from lightgbm import LGBMRegressor
_N_ROWS = 10000
_N_COLS = 10
_N_DECIMALS = 5
_FRAC = 0.9997
_X = pandas.DataFrame(numpy.random.random(
size=(_N_ROWS, _N_COLS)).astype(numpy.float32))
_Y = pandas.Series(numpy.random.random(size=_N_ROWS))
_objectives = ("regression",)
for objective in _objectives:
with self.subTest(X=_X, objective=objective):
initial_types = self._calc_initial_types(_X)
regressor = LGBMRegressor(
objective=objective, boosting='rf', bagging_freq=3,
bagging_fraction=0.5, n_estimators=10)
regressor.fit(_X, _Y)
regressor_onnx = to_onnx(
regressor, initial_types=initial_types,
rewrite_ops=True)
y_pred = regressor.predict(_X)
y_pred_onnx = self._predict_with_onnx(regressor_onnx, _X) / 10
self._assert_almost_equal(
y_pred, y_pred_onnx, decimal=_N_DECIMALS, frac=_FRAC,
msg="Objective=%r" % objective)
@ignore_warnings((RuntimeWarning, UserWarning))
def test_lgbm_regressor10(self):
from lightgbm import LGBMRegressor
data = load_iris()
X, y = data.data, data.target
X = X.astype(numpy.float32)
X_train, X_test, y_train, _ = train_test_split(X, y, random_state=0)
reg = LGBMRegressor(max_depth=2, n_estimators=4, seed=0)
reg.fit(X_train, y_train)
expected = reg.predict(X_test)
# float
onx = to_onnx(reg, X_train, rewrite_ops=True)
oinf = OnnxInference(onx)
got1 = oinf.run({'X': X_test})['variable']
# float split
onx = to_onnx(reg, X_train, options={'split': 2},
rewrite_ops=True)
oinf = OnnxInference(onx)
got2 = oinf.run({'X': X_test})['variable']
# final check
self.assertEqualArray(expected, got1, decimal=5)
self.assertEqualArray(expected, got2, decimal=5)
@ignore_warnings((RuntimeWarning, UserWarning))
def test_lgbm_regressor(self):
from lightgbm import LGBMRegressor
data = load_iris()
X, y = data.data, data.target
X = X.astype(numpy.float32)
X_train, X_test, y_train, _ = train_test_split(X, y, random_state=0)
reg = LGBMRegressor(max_depth=2, n_estimators=100, seed=0)
reg.fit(X_train, y_train)
expected = reg.predict(X_test)
# double
onx = to_onnx(reg, X_train.astype(numpy.float64),
rewrite_ops=True)
self.assertIn("TreeEnsembleRegressorDouble", str(onx))
oinf = OnnxInference(onx)
got0 = oinf.run(
{'X': X_test.astype(numpy.float64)})['variable']
self.assertEqualArray(expected, got0)
# float
onx = to_onnx(reg, X_train, rewrite_ops=True)
oinf = OnnxInference(onx)
got1 = oinf.run({'X': X_test})['variable']
self.assertEqualArray(expected, got1, decimal=5)
# float split
onx = to_onnx(reg, X_train, options={'split': 10},
rewrite_ops=True)
oinf = OnnxInference(onx)
got2 = oinf.run({'X': X_test})['variable']
self.assertEqualArray(expected, got2, decimal=5)
oinf = OnnxInference(onx, runtime='onnxruntime1')
got3 = oinf.run({'X': X_test})['variable']
self.assertEqualArray(expected, got3.ravel(), decimal=5)
# final
d0 = numpy.abs(expected.ravel() - got0).mean()
d1 = numpy.abs(expected.ravel() - got1).mean()
d2 = numpy.abs(expected.ravel() - got2).mean()
self.assertGreater(d1, d0)
self.assertGreater(d1, d2)
if __name__ == "__main__":
unittest.main()
|
<reponame>tum-i4/SACPS-robotics-system<gh_stars>0
#!/usr/bin/env python3
from typing import Tuple, List
import math
import numpy
import numpy as np
from math import inf
from scipy import signal
from random import randrange, shuffle
import rospy
from geometry_msgs.msg import Point
from nav_msgs.msg import OccupancyGrid
from std_msgs.msg import Header
from roaming_task.srv import GetRoamingTask, GetRoamingTaskRequest, GetRoamingTaskResponse
from commons.OccupancyMap import OccupancyMap, Cell
from commons_msgs.msg import Goal
class RoamingTask():
def __init__(self):
if rospy.get_param('/use_cum_map', True) and rospy.get_param('/global_roam', True):
self.threshold = 1
kernel_size = 3
self.kernel = np.ones([kernel_size, kernel_size])
self.window_size = 8
self.discovered_pub = rospy.Publisher('/discovered', Goal, queue_size=10)
rospy.init_node('roaming_task')
msg: OccupancyGrid = rospy.wait_for_message('/custom_layers/combined', OccupancyGrid)
self.costmap_grid = numpy.asarray(msg.data).reshape([msg.info.width, msg.info.height])
self.costmap = OccupancyMap.from_message(msg, with_data=False)
msg = rospy.wait_for_message('/robot_0/move_base/global_costmap/costmap', OccupancyGrid)
self.static_costmap_grid = numpy.asarray(msg.data).reshape([msg.info.width, msg.info.height])
self.static_costmap = OccupancyMap.from_message(msg, with_data=False)
rospy.Subscriber('/custom_layers/combined', OccupancyGrid, self.update_costmap)
rospy.Timer(rospy.Duration(rospy.get_param('observed_cells_update_freq', 80)), self.publish_roaming_goals)
rospy.Service('get_roaming_task', GetRoamingTask, self.get_roaming_goal)
rospy.spin()
def publish_roaming_goals(self, _=None):
roaming_points = self.determine_critical_points(self.costmap_grid, self.window_size)
for x, y in roaming_points:
print(x, y)
world_p = self.costmap.costmap2world(Cell(x, y))
print(world_p)
self.discovered_pub.publish(Goal(x=world_p.x, y=world_p.y, is_virtual=True,
header=Header(stamp=rospy.get_rostime(), frame_id="map")))
def get_roaming_goal(self, req: GetRoamingTaskRequest):
world_p = Point()
robot_position = Point(req.task.x, req.task.y, 0)
roaming_points = self.determine_critical_points(self.costmap_grid, self.window_size)
min_p = None
min_dist = inf
for x, y in roaming_points:
world_p = self.costmap.costmap2world(Cell(x, y))
d = math.sqrt((world_p.x - robot_position.x) ** 2 + (world_p.y - robot_position.y) ** 2)
if d < min_dist:
min_dist = d
min_p = world_p
if min_p is not None:
return GetRoamingTaskResponse(task=Goal(None, world_p.x, world_p.y, True), success=True)
else:
return GetRoamingTaskResponse(task=Goal(None, 0, 0, True), success=False)
def convolve(self, face, kernel) -> numpy.array:
convolved = signal.fftconvolve(face, kernel, mode='same')
convolved[convolved < self.threshold] = 0
return convolved
def determine_critical_points(self, costmap, radius: int) -> List[Tuple[int, int]]:
convolved = self.convolve(costmap.T, self.kernel)
i = 0
while i <= 50:
convolved[i][0] = 1000
convolved[i][1] = 1000
convolved[i][50] = 1000
convolved[i][49] = 1000
i += 1
j = 0
while j <= 50:
convolved[0][j] = 1000
convolved[50][j] = 1000
convolved[1][j] = 1000
convolved[49][j] = 1000
j += 1
numpy.set_printoptions(threshold=np.inf)
print(convolved)
result_points: List[Tuple[int, int]] = list()
x_result = y_result = None
result = np.where(convolved == 0)
# print('num results: ', len(result[0]), " results: ", str(result))
while self.all_points_iterated(convolved):
result = np.where(convolved == 0)
# print("iterate!!!!!!"+str(len(result[0])))
indices = list(range(result[0].shape[0]))
shuffle(indices)
for i in indices:
x = result[0][i]
y = result[1][i]
world_point = self.costmap.costmap2world(Cell(x, y))
static_costmap_point = self.static_costmap.world2costmap(world_point)
if self.static_costmap_grid.T[static_costmap_point.x, static_costmap_point.y] <= 0:
print(
f'{world_point} static map at {static_costmap_point} :'
f'{self.static_costmap_grid[static_costmap_point.x, static_costmap_point.y]}')
x_result = x
y_result = y
# print("@@@!!new point")
break
else:
convolved[x][y] = 1000
if x_result is not None:
result_points.append((x_result, y_result))
# print("result array index selected " + str(i))
# print('Tuple of arrays returned x:', x_result, ' y:', y_result)
convolved = self.slide(convolved, x_result, y_result, radius)
# print('num results: ', len(result[0]), " results: ", str(result))
else:
break
return result_points
@staticmethod
def slide(convolved, x: int, y: int, radius: int):
i = -radius
while i <= radius:
j = -radius
while j <= radius:
if 0 < x + i <= 50 and 0 < y + j <= 50:
# print("x: "+str(x + i)+" y: "+str(y + j )+" "+str(convolved[x + i][y + j]))
convolved[x + i][y + j] = 1000
elif x == 0 and y + j <= 50:
convolved[0][y + j] = 1000
elif y == 0 and x + i <= 50:
convolved[0][x + i] = 1000
elif y == 50 and x + i <= 50:
convolved[x + i][50] = 1000
elif x == 50 and y + j <= 50:
convolved[y + j][50] = 1000
j += 1
i += 1
return convolved
@staticmethod
def all_points_iterated(array):
result = np.where(array == 0)
# print('num results: ', len(result[0]), " results: ", str(result))
return len(result[0])
def update_costmap(self, msg: OccupancyGrid):
self.costmap_grid = numpy.asarray(msg.data).reshape([msg.info.width, msg.info.height])
|
<gh_stars>0
import os
import json
import pathlib
import logging
from sys import argv, exit, stdout
from datetime import date
initialized = False
def main():
global securePath
global settings
global logPath
global configPath
global initialized
settings = None
if initialized:
return
# config file, stored within the package
configPath = f'{pathlib.Path(__file__).resolve().parent}/config.json'
# Determines where data is stored; by default, this is ~/securedata
securePath = os.path.expanduser(getConfigItem('path_securedata')) or f'{os.path.expanduser("~")}/securedata'
# initialize settings file if it doesn't exist
try:
with open(f'{securePath}/settings.json', 'r+') as f:
f.seek(0, os.SEEK_END)
except:
if not os.path.exists(securePath):
os.makedirs(securePath)
with open(f'{securePath}/settings.json', 'x+') as f:
print(f"\n\nWarning: settings.json not found; created a blank one in {securePath}")
print("You can change this location by calling 'securedata config'.\n\n")
f.write('{}')
settings = json.load(open(f'{securePath}/settings.json'))
logPath = getItem('path_log') or setItem(
'path_log', f"{securePath}/log", fileName='settings.json')
if not os.path.exists(logPath):
os.makedirs(logPath)
if not logPath[-1] == '/':
logPath += '/'
initialized = True
def getItem(*attribute):
"""
Returns a property in settings.json.
Usage: get('person', 'name')
"""
global settings
if settings == None:
return None
_settings = settings
for index, item in enumerate(attribute):
if item in _settings:
_settings = _settings[item]
else:
print(
f"Warning: {item} not found in {_settings if index > 0 else f'{securePath}/settings.json'}")
return None
return _settings
def setItem(*attribute, value=None, fileName='settings.json'):
"""
Sets a property in settings.json (or some other `fileName`).
Usage: set('person', 'name', 'Tyler')
The last argument is the value to set, unless value is specified.
Returns the value set.
"""
global settings
secureFullPath = f"{securePath}/{fileName}"
if(not value):
value = attribute[-1]
_settings = settings if fileName == 'settings.json' else json.load(
open(secureFullPath))
# iterate through entire JSON object and replace 2nd to last attribute with value
partition = _settings
for index, item in enumerate(attribute[:-1]):
if item not in partition:
partition[item] = value if index == len(attribute) - 2 else {}
partition = partition[item]
print(
f"Warning: Adding new key '{item}' to {partition if index > 0 else secureFullPath}")
else:
if(index == len(attribute) - 2):
partition[item] = value
else:
partition = partition[item]
with open(secureFullPath, 'w+') as file:
json.dump(_settings, file, indent=4)
return value
def getFileAsArray(item, filePath=None):
"""
Returns the file as an array
"""
global logPath
if(filePath == None):
filePath = logPath
elif(filePath == "notes"):
filePath = getItem('path_tasks_notes')
if(not filePath[-1] == '/'):
filePath += '/'
# pull from cloud
try:
os.system(f"rclone copy {getItem('path_cloud_notes')} {filePath}")
except Exception as e:
log(f"Could not pull Notes from cloud: {e}", level="error")
try:
content = open(filePath + item, "r").read()
return content.split('\n')
except Exception as e:
log(f"getFileAsArray: {e}", level="error")
return ""
def writeFile(fileName, filePath=None, content=None, append=False):
"""
Writes a file to the specified path and creates subfolders if necessary
"""
global logPath
_filePath = filePath
if filePath == None:
filePath = logPath
elif filePath == "notes":
filePath = getItem('path_tasks_notes')
if content == None:
content = ""
if not os.path.exists(filePath):
os.makedirs(filePath)
with open(filePath + "/" + fileName, 'w+' if not append else 'a+') as file:
file.write(content)
# push to cloud
if _filePath == "notes":
try:
os.system(f"rclone copy {filePath} {getItem('path_cloud_notes')}")
except Exception as e:
log(f"Could not sync Notes to cloud: {e}", level="error")
def getConfigItem(key=None):
global configPath
try:
with open(configPath, 'r+') as file:
return json.load(file)[key]
except FileNotFoundError:
if key == 'path_securedata':
setConfigItem(key, str(pathlib.Path(getItem('path_log')).resolve().parent))
return str(pathlib.Path(getItem('path_log')).resolve().parent)
except KeyError:
return None
def setConfigItem(key=None, value=None):
"""
Updates the internal configuration file
"""
global configPath
if value == "":
print("No changes were made.")
exit(1)
else:
# error correction
if(key == 'path_securedata' and value[0] != '/' and value[0] != '~'):
value = f"/{value}"
if(key == 'path_securedata' and value[-1] == '/'):
value = f"{value[:-1]}"
# warn about potential problems
if(not os.path.exists(os.path.expanduser(value))):
print(f"Warning: {value} is not a valid path.")
if(value[0] == '~'):
print("Warning: using tilde expansions may cause problems if using securedata for multiple users. It is recommended to use full paths.")
try:
with open(configPath, 'r+') as file:
config = json.load(file)
except FileNotFoundError:
with open(configPath, 'x+') as f:
print(f"Warning: existing config file not found; created a new one")
f.write('{}')
config = {}
config[key] = value
with open(f'{pathlib.Path(__file__).resolve().parent}/config.json', 'w+') as file:
json.dump(config, file, indent=4)
print(f"\n\nUpdated configuration file ({pathlib.Path(__file__).resolve().parent}/config.json).")
print(f"{key} is now {value}")
return value
def getLogger(logName=None, level=logging.INFO, filePath=None):
"""
Returns a custom logger with the given name and level
"""
today = str(date.today())
if filePath == None:
filePath = f"{logPath}{today}"
if logName == None:
logName = f"LOG_DAILY {today}"
# create path if necessary
if not os.path.exists(filePath):
print(f"Creating {filePath}")
os.makedirs(filePath)
logger = logging.getLogger(logName)
logger.setLevel(level)
if logger.handlers:
logger.handlers = []
format_string = ("%(asctime)s — %(levelname)s — %(message)s")
log_format = logging.Formatter(format_string)
console_handler = logging.StreamHandler(stdout)
console_handler.setFormatter(log_format)
logger.addHandler(console_handler)
file_handler = logging.FileHandler(f"{filePath}/{logName}", mode='a')
file_handler.setFormatter(log_format)
logger.addHandler(file_handler)
return logger
def log(message=None, logName=None, level="info", filePath=None):
if message == None:
message = ""
if level == None or level == "info":
logger = getLogger(logName=logName, level=logging.INFO, filePath=filePath)
logger.info(message)
elif level == "debug":
logger = getLogger(logName=logName, level=logging.DEBUG, filePath=filePath)
logger.debug(message)
elif level == "warn" or level == "warning":
logger = getLogger(logName=logName, level=logging.WARN, filePath=filePath)
logger.warning(message)
elif level == "error":
logger = getLogger(logName=logName, level=logging.ERROR, filePath=filePath)
logger.error(message)
elif level == "critical":
logger = getLogger(logName=logName, level=logging.CRITICAL, filePath=filePath)
logger.critical(message)
else:
logger = getLogger(logName=logName, level=logging.ERROR, filePath=filePath)
logger.error(f"Unknown log level: {level}; using ERROR")
logger.error(message)
# Initialize
main()
if __name__ == "__main__":
print(f"SecureData is a library not intended to be directly run. See README.md.")
if argv[-1] == 'config':
setConfigItem('path_securedata', input("Enter the full path of where you want to store all data:\n")) |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_vlan_find
short_description: Find VLANs on Cisco UCS Manager
description:
- Find VLANs on Cisco UCS Manager based on different criteria.
extends_documentation_fragment: ucs
options:
pattern:
description:
- Regex pattern to find within the name property of the fabricVlan class.
- This is required if C(vlanid) parameter is not supplied.
type: str
fabric:
description:
- "The fabric configuration of the VLAN. This can be one of the following:"
- "common - The VLAN applies to both fabrics and uses the same configuration parameters in both cases."
- "A — The VLAN only applies to fabric A."
- "B — The VLAN only applies to fabric B."
choices: [common, A, B]
default: common
type: str
vlanid:
description:
- The unique string identifier assigned to the VLAN.
- A VLAN ID can be between '1' and '3967', or between '4048' and '4093'.
- This is required if C(pattern) parameter is not supplied.
type: str
requirements:
- ucsmsdk
author:
- <NAME> (@dx0xm)
- CiscoUcs (@CiscoUcs)
version_added: '2.9'
'''
EXAMPLES = r'''
- name: Get all vlans in fabric A
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
fabric: 'A'
pattern: '.'
- name: Confirm if vlan 15 is present
ucs_vlan_find:
hostname: 172.16.143.150
username: admin
password: password
vlanid: '15'
'''
RETURN = r'''
vlan_list:
description: basic details of vlans found
returned: on success
type: list
sample: [
{
"id": "0",
"name": "vlcloud1"
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
fabric=dict(type='str', default='common', choices=['common', 'A', 'B']),
pattern=dict(type='str'),
vlanid=dict(type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['pattern', 'vlanid']]
)
ucs = UCSModule(module)
filtls = ['(cloud,"ethlan")']
if module.params['fabric'] != 'common':
filtls.append('(switch_id,"' + module.params['fabric'] + '")')
if module.params['vlanid']:
filtls.append('(id,"' + module.params['vlanid'] + '")')
else:
filtls.append('(name,"' + module.params['pattern'] + '")')
object_dict = ucs.login_handle.query_classid("fabricVlan", filter_str=' and '.join(filtls))
if object_dict is None:
module.fail_json(msg="Failed to query vlan objects")
vlnlist = []
for ob in object_dict:
vlnlist.append(dict(name=ob.name, id=ob.id))
module.exit_json(changed=False,
vlan_list=vlnlist)
if __name__ == '__main__':
main()
|
<filename>testTransition.py<gh_stars>0
# modified
import unittest
from transition import Board
class TestBoardMethods(unittest.TestCase):
def setUp(self):
self.threeX3List = [['X','X','X'],
['.','.','.'],
['O','O', 'O'],
]
self.threeX3Board = Board(self.threeX3List, 'X')
def tearDown(self):
pass
<EMAIL>("False value already tested")
def test_is_valid_false(self):
pairs_false = [[(0,0),(0,0)], # False, src = dst
[(0,0),(0,2)], # False, two units, same row
[(0,0),(-1,0)], # False, out of index
[(0,0),(0,1)], # False, same row
[(0,0),(1,-1)], # False, out of index
[(0,0),(1,2)], # False, moving two units
[(0,0),(2,0)], # False, moving two units
[(0,0),(2,2)], # False, moving two units
[(0,0),(11,11)] # False, out of index
]
for pair in pairs_false:
with self.subTest(pair=pair):
self.assertIs(self.threeX3Board.is_valid(pair[0],pair[1]), False)
<EMAIL>("False value already tested")
def test_is_valid_true(self):
pairs_true = [[(0,0),(1,0)], # True, forward
[(0,0),(1,1)], # True, diagonal left
[(0,1),(1,0)], # True, diagonal left
[(0,1),(1,1)], # True, forward
[(0,1),(1,2)], # True, diagonal right
[(2,0),(1,0)], # True, forward
[(2,0),(1,1)], # True, diagonal right
[(2,2),(1,2)], # False, forward
[(2,1),(1,0)] # True, diagonal left
]
for pair in pairs_true:
with self.subTest(pair=pair):
self.assertTrue(self.threeX3Board.is_valid(pair[0],pair[1]))
<EMAIL>("False value already tested")
def test_get_moves_returns_empty(self):
# For point where the returned list will be empty
dots = [(1,0), # [], no player, column-0
(1,1), # [], no player, middle
(1,2) # [], no player, rightmost column
]
for each in dots:
with self.subTest(each=each):
self.assertEqual(self.threeX3Board.get_moves(each), [])
<EMAIL>("False value already tested")
def test_get_moves_for_playerO_at_positions(self):
# For point where the returned list will be empty
dots = [(2,0), # [], playerO, column-0
(2,1), # [], playerO, middle
(2,2) # [], playerO, rightmost column
]
self.assertEqual(self.threeX3Board.get_moves(dots[0]), [(1,0),(1,1)])
self.assertEqual(self.threeX3Board.get_moves(dots[1]), [(1,0),(1,1),(1,2)])
self.assertEqual(self.threeX3Board.get_moves(dots[2]), [(1,1),(1,2)])
<EMAIL>("False value already tested")
def test_get_moves_for_playerX_at_positions(self):
# For point where the returned list will be empty
dots = [(0,0), # [], playerX, column-0
(0,1), # [], playerX, middle
(0,2) # [], playerX, rightmost column
]
self.assertEqual(self.threeX3Board.get_moves(dots[0]), [(1,0),(1,1)])
self.assertEqual(self.threeX3Board.get_moves(dots[1]), [(1,0),(1,1),(1,2)])
self.assertEqual(self.threeX3Board.get_moves(dots[2]), [(1,1),(1,2)])
def test_all_moves_for_playerX(self):
"""Supposed to return a dictionary with (x,y):['L','F','R']"""
self.assertCountEqual(self.threeX3Board.all_moves('X')[(0,0)], ['L', 'F'])
self.assertCountEqual(self.threeX3Board.all_moves('X')[(0,1)], ['L','F','R'])
self.assertCountEqual(self.threeX3Board.all_moves('X')[(0,2)], ['F','R'])
# The syntax does not seem to be working
# self.assertRaises(TypeError, self.threeX3Board.all_moves(), 'G')
def test_move_forward(self):
old_new_pairs = [[(0,0),(1,0)],
[(2,0),(1,0)],
[(0,1),(1,1)],
[(2,1),(1,1)],
[(0,2),(1,2)],
[(2,2),(1,2)]
]
for pair in old_new_pairs:
with self.subTest(pair = pair):
player = self.threeX3Board.get_sym(pair[0])
self.threeX3Board.move(pair[0], 'F')
self.assertIs(self.threeX3Board.get_sym((pair[0])), '.') # emptied
self.assertIs(self.threeX3Board.get_sym((pair[1])), player) # newly occupied
def test_move_dright_playerO(self):
pass
def test_move_dleft_for_playerO(self):
pass
if __name__ == '__main__':
unittest.main()
|
<filename>api/gsearch.py
# -*- coding: utf-8 -*-
import os
import sys, io
from collections import namedtuple
# from selenium import webdriver
# from selenium.common.exceptions import NoSuchElementException
# from selenium.webdriver.common.keys import Keys
from pprint import pprint
from joblib import Parallel, delayed
import requests
from bs4 import BeautifulSoup, Comment
import html5lib
import re
SearchResultRow = namedtuple(
'SearchResultRow',
['title', 'url', 'display_url', 'dis']
)
ArticleResultRow = namedtuple(
'ArticleResultRow',
['html']
)
# os.environ['MOZ_HEADLESS'] = '1'
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
class GoogleScrapy:
def __init__(self, keyword, default_wait=1):
self.url = 'https://www.google.co.jp/search?pws=0&tbs=qdr:w'
self.keyword = keyword
# self.default_wait = default_wait
# self.driver = None
self.searches = []
self.articles = []
def enter_keyword(self):
self.driver.get(self.url)
self.driver.find_element_by_id('lst-ib').send_keys(self.keyword)
self.driver.find_element_by_id('lst-ib').send_keys(Keys.RETURN)
# def get_search(self):
# all_search = self.driver.find_elements_by_class_name('rc')
# for data in all_search:
# title = data.find_element_by_tag_name('h3').text
# url = data.find_element_by_css_selector(
# 'h3 > a').get_attribute('href')
# display_url = data.find_element_by_tag_name('cite').text
# try:
# dis = data.find_element_by_class_name('st').text
# except NoSuchElementException:
# dis = ''
# result = SearchResultRow(title, url, display_url, dis)
# self.searches.append(result)
def get_search(self):
try:
resp = requests.get(self.url + '&q=' + self.keyword, timeout=1)
except requests.exceptions.RequestException as e:
pprint(e)
return
soup = BeautifulSoup(resp.content, 'html5lib')
el_url = soup.select('.r a')
n_articles = min(10, len(el_url))
for i in range(n_articles):
url = 'http://google.co.jp' + el_url[i].get('href')
self.searches.append(SearchResultRow('title', url, 'display_url', 'desc'))
# @staticmethod
# def get_article(url):
# pprint(url)
# driver = webdriver.Firefox()
# driver.get(url)
# driver.implicitly_wait(1)
# try:
# html = driver.execute_script("return document.body.innerHTML")
# except NoSuchElementException:
# html = ''
# except:
# html = ''
# return html
@staticmethod
def get_article(url):
try:
resp = requests.get(url, timeout=1)
except requests.exceptions.RequestException as e:
pprint(e)
return ""
soup = BeautifulSoup(resp.content, 'html5lib')
# [s.decompose() for s in soup('style')]
# [s.decompose() for s in soup('script')]
[s.replace_with('\n') for s in soup('style')]
[s.replace_with('\n') for s in soup('script')]
article = soup.get_text()
# for comment in soup(text=lambda x: isinstance(x, Comment)):
# comment.extract()
# for script in soup.find_all('script', src=False):
# script.decompose()
# for text in soup.find_all(text=True):
# if text.strip():
# article += text
return article
def start(self):
# try:
# self.driver = webdriver.Firefox()
# self.driver.implicitly_wait(self.default_wait)
# self.enter_keyword()
self.get_search()
self.articles = Parallel(n_jobs=-1)([delayed(self.get_article)(search.url) for search in self.searches])
# finally:
# self.driver.quit()
|
#!/usr/bin/env python
import os.path
import re
import subprocess
import sys
from in_file import InFile
from name_utilities import enum_for_css_keyword
from name_utilities import upper_first_letter
import in_generator
import license
HEADER_TEMPLATE = """
%(license)s
#ifndef %(class_name)s_h
#define %(class_name)s_h
#include "core/css/parser/CSSParserMode.h"
#include <string.h>
namespace blink {
enum CSSValueID {
%(value_keyword_enums)s
};
const int numCSSValueKeywords = %(value_keywords_count)d;
const size_t maxCSSValueKeywordLength = %(max_value_keyword_length)d;
const char* getValueName(CSSValueID);
bool isValueAllowedInMode(unsigned short id, CSSParserMode mode);
} // namespace blink
#endif // %(class_name)s_h
"""
GPERF_TEMPLATE = """
%%{
%(license)s
#include "%(class_name)s.h"
#include "core/css/HashTools.h"
#include <string.h>
#ifdef _MSC_VER
// Disable the warnings from casting a 64-bit pointer to 32-bit long
// warning C4302: 'type cast': truncation from 'char (*)[28]' to 'long'
// warning C4311: 'type cast': pointer truncation from 'char (*)[18]' to 'long'
#pragma warning(disable : 4302 4311)
#endif
namespace blink {
static const char valueListStringPool[] = {
%(value_keyword_strings)s
};
static const unsigned short valueListStringOffsets[] = {
%(value_keyword_offsets)s
};
%%}
%%struct-type
struct Value;
%%omit-struct-type
%%language=C++
%%readonly-tables
%%compare-strncmp
%%define class-name %(class_name)sHash
%%define lookup-function-name findValueImpl
%%define hash-function-name value_hash_function
%%define slot-name nameOffset
%%define word-array-name value_word_list
%%pic
%%enum
%%%%
%(value_keyword_to_enum_map)s
%%%%
const Value* findValue(register const char* str, register unsigned int len)
{
return CSSValueKeywordsHash::findValueImpl(str, len);
}
const char* getValueName(CSSValueID id)
{
ASSERT(id > 0 && id < numCSSValueKeywords);
return valueListStringPool + valueListStringOffsets[id - 1];
}
bool isValueAllowedInMode(unsigned short id, CSSParserMode mode)
{
switch (id) {
%(ua_sheet_mode_values_keywords)s
return isUASheetBehavior(mode);
%(quirks_mode_or_ua_sheet_mode_values_keywords)s
return isUASheetBehavior(mode) || isQuirksModeBehavior(mode);
default:
return true;
}
}
} // namespace blink
"""
class CSSValueKeywordsWriter(in_generator.Writer):
class_name = "CSSValueKeywords"
defaults = {
'mode': None,
}
def __init__(self, file_paths):
in_generator.Writer.__init__(self, file_paths)
self._outputs = {(self.class_name + ".h"): self.generate_header,
(self.class_name + ".cpp"): self.generate_implementation,
}
self._value_keywords = self.in_file.name_dictionaries
first_keyword_id = 1
for offset, keyword in enumerate(self._value_keywords):
keyword['lower_name'] = keyword['name'].lower()
keyword['enum_name'] = enum_for_css_keyword(keyword['name'])
keyword['enum_value'] = first_keyword_id + offset
if keyword['name'].startswith('-internal-'):
assert keyword['mode'] is None, 'Can\'t specify mode for value keywords with the prefix "-internal-".'
keyword['mode'] = 'UASheet'
else:
assert keyword['mode'] != 'UASheet', 'UASheet mode only value keywords should have the prefix "-internal-".'
def _enum_declaration(self, keyword):
return " %(enum_name)s = %(enum_value)s," % keyword
def _case_value_keyword(self, keyword):
return "case %(enum_name)s:" % keyword
def generate_header(self):
enum_enties = map(self._enum_declaration, [{'enum_name': 'CSSValueInvalid', 'enum_value': 0}] + self._value_keywords)
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'value_keyword_enums': "\n".join(enum_enties),
'value_keywords_count': len(enum_enties),
'max_value_keyword_length': max(len(keyword['name']) for keyword in self._value_keywords),
}
def _value_keywords_with_mode(self, mode):
return filter(lambda keyword: keyword['mode'] == mode, self._value_keywords)
def generate_implementation(self):
keyword_offsets = []
current_offset = 0
for keyword in self._value_keywords:
keyword_offsets.append(current_offset)
current_offset += len(keyword["name"]) + 1
gperf_input = GPERF_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'class_name': self.class_name,
'value_keyword_strings': '\n'.join(' "%(name)s\\0"' % keyword for keyword in self._value_keywords),
'value_keyword_offsets': '\n'.join(' %d,' % offset for offset in keyword_offsets),
'value_keyword_to_enum_map': '\n'.join('%(lower_name)s, %(enum_name)s' % keyword for keyword in self._value_keywords),
'ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))),
'quirks_mode_or_ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))),
}
# FIXME: If we could depend on Python 2.7, we would use subprocess.check_output
gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n']
gperf_args.extend(['-m', '50']) # Pick best of 50 attempts.
gperf_args.append('-D') # Allow duplicate hashes -> More compact code.
gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
return gperf.communicate(gperf_input)[0]
if __name__ == "__main__":
in_generator.Maker(CSSValueKeywordsWriter).main(sys.argv)
|
<gh_stars>1-10
#! /usr/bin/env python
import collections
import datetime
import locale
import pathlib
import shutil
from typing import Dict, Iterable, Iterator, List
import jinja2
import mistune
from pygments import highlight
from pygments.formatters import html
from pygments.lexers import get_lexer_by_name
locale.setlocale(locale.LC_ALL, "ru_RU.UTF-8")
MetaPost = collections.namedtuple(
"MetaPost", ["body", "listed", "date", "url"]
)
class HighlightRenderer(mistune.Renderer):
def block_code(self, code, lang):
if not lang:
print("no lang")
return f"\n<pre><code>{mistune.escape(code)}</code></pre>\n"
lexer = get_lexer_by_name(lang, stripall=True)
formatter = html.HtmlFormatter()
formatter.noclasses = True
return highlight(code, lexer, formatter)
def translit(title: str) -> str:
rus = "абвгдеёжзийклмнопрстуфхцчшщьъыэюя "
eng = ("a", "b", "v", "g", "d", "e", "e", "zh", "z", "i", "iy",
"k", "l", "m", "n", "o", "p", "r", "s", "t", "u", "f", "h",
"ts", "ch", "sh", "sch", "", "", "y", "e", "u", "ya", "-")
title = title.strip().lower()
new_title = ""
for ch in title:
idx = rus.find(ch)
if idx >= 0:
new_title += eng[idx]
elif ch.isalnum():
new_title += ch
while "--" in new_title:
new_title = new_title.replace("--", "-")
return new_title.strip("-")
def parse(lines: Iterable[str]) -> Dict[str, str]:
pref, suff = "<!--", "-->"
post = collections.defaultdict(str)
for line in lines:
if line.lstrip().startswith(pref) and line.rstrip().endswith(suff):
line = line.strip()
space_idx = line.find(" ")
identifier = line[len(pref):space_idx]
if space_idx > 0 and identifier.isidentifier():
post[identifier] = line[space_idx+1:-len(suff)]
else:
post["content"] += line
return post
def posts_generator(
md_files: Iterable[pathlib.Path], output_folder: pathlib.Path
) -> Iterator[MetaPost]:
for md in md_files:
body = None
with md.open() as file:
body = parse(file)
post_url = translit(body["title"])
if post_url == "":
raise ValueError(
"""{}: неправильное или отсутствующее поле title.
Поле title должно содержать буквы""".format(md.name)
)
try:
date = datetime.datetime.strptime(
body["date"].strip(), "%d.%m.%Y"
)
except ValueError as std_date_exception:
raise ValueError(
"""{}: неправильное или отсутствующее поле date.
Дата должна быть в формате дд.мм.ГГГГ""".format(md.name)
) from std_date_exception
yield MetaPost(
body=body, url=str(post_url),
listed=not md.name.startswith("_"), date=date
)
def get_md_files() -> Iterator[pathlib.Path]:
md_folder = pathlib.Path("md-files")
md_folder.mkdir(exist_ok=True)
return md_folder.glob("*.md")
def make_directory_clean(directory: pathlib.Path) -> None:
shutil.rmtree(f"{directory}")
directory.mkdir()
def publish_pages(
output_folder: pathlib.Path, markdown_generator: mistune.Markdown,
template_engine: jinja2.Environment
) -> List[MetaPost]:
index = []
md_files = get_md_files()
for meta_post in posts_generator(md_files, output_folder):
meta_post = post2html(meta_post, markdown_generator)
make_page(meta_post, output_folder, template_engine)
meta_post.body["content"] = ""
if meta_post.listed:
index.append(meta_post)
return index
def make_page(
meta_post: MetaPost, output_folder: pathlib.Path,
template_engine: jinja2.Environment
) -> None:
try:
page = template_engine.get_template("page.html")
except jinja2.TemplateNotFound as template_not_found_err:
raise jinja2.TemplateNotFound(
"Не найден шаблон для страницы статьи: theme/page.html"
) from template_not_found_err
html = page.render(post=meta_post)
output_folder.joinpath(meta_post.url).mkdir(exist_ok=True)
output_folder.joinpath(meta_post.url, "index.html").write_text(html)
def make_index(
index: List[MetaPost], pages_folder: pathlib.Path,
template_engine: jinja2.Environment
) -> None:
try:
index_page = template_engine.get_template("index.html")
except jinja2.TemplateNotFound as template_not_found_err:
raise jinja2.TemplateNotFound(
"Не найден шаблон главной страницы: theme/index.html"
) from template_not_found_err
html = index_page.render(posts=index, pages_folder=str(pages_folder))
pathlib.Path("index.html").write_text(html)
def post2html(meta_post: MetaPost, markdown: mistune.Markdown) -> MetaPost:
for key in meta_post.body:
if "\n" in meta_post.body[key]:
meta_post.body[key] = markdown(meta_post.body[key])
return meta_post
if __name__ == "__main__":
renderer = HighlightRenderer()
markdown_generator = mistune.Markdown(renderer=renderer)
template_engine = jinja2.Environment(
loader=jinja2.FileSystemLoader("theme")
)
pages_folder = pathlib.Path("pages")
pages_folder.mkdir(exist_ok=True)
make_directory_clean(pages_folder)
index = []
try:
index = publish_pages(
pages_folder, markdown_generator, template_engine
)
except (ValueError, jinja2.TemplateNotFound) as err:
print(err)
index.sort(key=lambda post: post.date, reverse=True)
make_index(index, pages_folder, template_engine)
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import datetime
import os
from azure.cli.core._util import get_file_json, CLIError
# 1 hour in milliseconds
DEFAULT_QUERY_TIME_RANGE = 3600000
# ISO format with explicit indication of timezone
DATE_TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def list_metric_definitions(client, resource_id, metric_names=None):
'''Commands to manage metric definitions.
:param str resource_id: The identifier of the resource
:param str metric_names: The list of metric names
'''
odata_filter = _metric_names_filter_builder(metric_names)
metric_definitions = client.list(resource_id, filter=odata_filter)
return list(metric_definitions)
def _metric_names_filter_builder(metric_names=None):
'''Build up OData filter string from metric_names
'''
filters = []
if metric_names:
for metric_name in metric_names:
filters.append("name.value eq '{}'".format(metric_name))
return ' or '.join(filters)
# pylint: disable=too-many-arguments
def list_metrics(client, resource_id, time_grain,
start_time=None, end_time=None, metric_names=None):
'''Lists the metric values for a resource.
:param str resource_id: The identifier of the resource
:param str time_grain: The time grain. Granularity of the metric data returned in ISO 8601
duration format, eg "PT1M"
:param str start_time: The start time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to
1 Hour prior to the current time.
:param str end_time: The end time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to
current time.
:param str metric_names: The space separated list of metric names
'''
odata_filter = _metrics_odata_filter_builder(time_grain, start_time, end_time, metric_names)
metrics = client.list(resource_id, filter=odata_filter)
return list(metrics)
def _metrics_odata_filter_builder(time_grain, start_time=None, end_time=None,
metric_names=None):
'''Build up OData filter string
'''
filters = []
metrics_filter = _metric_names_filter_builder(metric_names)
if metrics_filter:
filters.append('({})'.format(metrics_filter))
if time_grain:
filters.append("timeGrain eq duration'{}'".format(time_grain))
filters.append(_validate_time_range_and_add_defaults(start_time, end_time))
return ' and '.join(filters)
def _validate_time_range_and_add_defaults(start_time, end_time,
formatter='startTime eq {} and endTime eq {}'):
end_time = _validate_end_time(end_time)
start_time = _validate_start_time(start_time, end_time)
time_range = formatter.format(start_time.strftime('%Y-%m-%dT%H:%M:%SZ'),
end_time.strftime('%Y-%m-%dT%H:%M:%SZ'))
return time_range
def _validate_end_time(end_time):
result_time = datetime.datetime.utcnow()
if isinstance(end_time, str):
result_time = datetime.datetime.strptime(end_time, DATE_TIME_FORMAT)
return result_time
def _validate_start_time(start_time, end_time):
if not isinstance(end_time, datetime.datetime):
raise ValueError("Input '{}' is not valid datetime. Valid example: 2000-12-31T12:59:59Z"
.format(end_time))
result_time = end_time - datetime.timedelta(seconds=DEFAULT_QUERY_TIME_RANGE)
if isinstance(start_time, str):
result_time = datetime.datetime.strptime(start_time, DATE_TIME_FORMAT)
now = datetime.datetime.utcnow()
if result_time > now:
raise ValueError("start_time '{}' is later than Now {}.".format(start_time, now))
return result_time
# pylint: disable=too-many-arguments
def list_activity_log(client, filters=None, correlation_id=None, resource_group=None,
resource_id=None, resource_provider=None, start_time=None, end_time=None,
caller=None, status=None, max_events=50, select=None):
'''Provides the list of activity log.
:param str filters: The OData filter for the list activity logs. If this argument is provided
OData Filter Arguments will be ignored
:param str correlation_id: The correlation id of the query
:param str resource_group: The resource group
:param str resource_id: The identifier of the resource
:param str resource_provider: The resource provider
:param str start_time: The start time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to
1 Hour prior to the current time.
:param str end_time: The end time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500. Defaults to
current time.
:param str caller: The caller to look for when querying
:param str status: The status value to query (ex: Failed)
:param str max_events: The maximum number of records to be returned by the command
:param str select: The list of event names
'''
if filters:
odata_filters = filters
else:
collection = [correlation_id, resource_group, resource_id, resource_provider]
if not _single(collection):
raise CLIError("usage error: [--correlation-id ID | --resource-group NAME | "
"--resource-id ID | --resource-provider PROVIDER]")
odata_filters = _build_activity_log_odata_filter(correlation_id, resource_group,
resource_id, resource_provider,
start_time, end_time,
caller, status)
if max_events:
max_events = int(max_events)
select_filters = _activity_log_select_filter_builder(select)
activity_log = client.list(filter=odata_filters, select=select_filters)
return _limit_results(activity_log, max_events)
def _single(collection):
return len([x for x in collection if x]) == 1
# pylint: disable=too-many-arguments
def _build_activity_log_odata_filter(correlation_id=None, resource_group=None, resource_id=None,
resource_provider=None, start_time=None, end_time=None,
caller=None, status=None):
'''Builds odata filter string.
:param str correlation_id: The correlation id of the query
:param str resource_group: The resource group
:param str resource_id: The identifier of the resource
:param str resource_provider: The resource provider
:param str start_time: The start time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500
:param str end_time: The end time of the query. In ISO format with explicit indication of
timezone: 1970-01-01T00:00:00Z, 1970-01-01T00:00:00-0500.
:param str caller: The caller to look for when querying
:param str status: The status value to query (ex: Failed)
'''
formatter = "eventTimestamp ge {} and eventTimestamp le {}"
odata_filters = _validate_time_range_and_add_defaults(start_time, end_time,
formatter=formatter)
if correlation_id:
odata_filters = _build_odata_filter(odata_filters, 'correlation_id',
correlation_id, 'correlationId')
elif resource_group:
odata_filters = _build_odata_filter(odata_filters, 'resource_group',
resource_group, 'resourceGroupName')
elif resource_id:
odata_filters = _build_odata_filter(odata_filters, 'resource_id',
resource_id, 'resourceId')
elif resource_provider:
odata_filters = _build_odata_filter(odata_filters, 'resource_provider',
resource_provider, 'resourceProvider')
if caller:
odata_filters = _build_odata_filter(odata_filters, 'caller',
caller, 'caller')
if status:
odata_filters = _build_odata_filter(odata_filters, 'status',
status, 'status')
return odata_filters
def _activity_log_select_filter_builder(events=None):
'''Build up select filter string from events
'''
if events:
return ' , '.join(events)
return None
def _build_odata_filter(default_filter, field_name, field_value, field_label):
if not field_value:
raise CLIError('Value for {} can not be empty.'.format(field_name))
return _add_condition(default_filter, field_label, field_value)
def _add_condition(default_filter, field_label, field_value):
if not field_value:
return default_filter
return "{} and {} eq '{}'".format(default_filter, field_label, field_value)
def _limit_results(paged, limit):
results = []
for index, item in enumerate(paged):
if index < limit:
results.append(item)
else:
break
return list(results)
def scaffold_autoscale_settings_parameters(client): # pylint: disable=unused-argument
'''Scaffold fully formed autoscale-settings' parameters as json template
'''
# Autoscale settings parameter scaffold file path
curr_dir = os.path.dirname(os.path.realpath(__file__))
autoscale_settings_parameter_file_path = os.path.join(
curr_dir, 'autoscale-parameters-template.json')
return _load_autoscale_settings_parameters(autoscale_settings_parameter_file_path)
def _load_autoscale_settings_parameters(file_path):
if not os.path.exists(file_path):
raise CLIError('File {} not found.'.format(file_path))
return get_file_json(file_path)
# pylint: disable=unused-argument
def create_diagnostics_settings(client, target_resource_id, resource_group=None, logs=None,
metrics=None, namespace=None, rule_name=None, tags=None,
service_bus_rule_id=None, storage_account=None, workspace=None):
from azure.mgmt.monitor.models.service_diagnostic_settings_resource import \
ServiceDiagnosticSettingsResource
# https://github.com/Azure/azure-rest-api-specs/issues/1058
# https://github.com/Azure/azure-rest-api-specs/issues/1059
parameters = ServiceDiagnosticSettingsResource(location='',
name='',
storage_account_id=storage_account,
service_bus_rule_id=service_bus_rule_id,
metrics=metrics,
logs=logs,
workspace_id=workspace,
tags=tags)
return client.create_or_update(target_resource_id, parameters)
|
from django.urls import path
from django.shortcuts import redirect
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.decorators.csrf import csrf_exempt
from wagtail.admin import urls as wagtailadmin_urls
from wagtail.core import urls as wagtail_urls
from wagtail.documents import urls as wagtaildocs_urls
from graphene_django.views import GraphQLView
from base.views import \
new_page_from_modal, \
joplin_search_views, \
publish_succeeded, \
site_search
from users.urls import users as user_urls
from snippets import urls as snippet_urls
from django.urls import reverse
import debug_toolbar
from api.views import PrivateGraphQLView, PrivateGraphiQLView
from api.decorators import jwt_token_decorator
from api.preview_schema import preview_schema
def home(request):
"""
* Search page as our 'HomePage' *
This "HomePage" function was how Joplin controlled our initial data flow before
switching over to "pages/search/" as our default page after sign in. If we want
to revert back to that or similar behavior, we could change our return statement
back to `return redirect('wagtailadmin_explore', page.id)`, and use
HomePage.objects.first() for the page.id.
"""
# page = HomePage.objects.first()
# return redirect('wagtailadmin_explore', page.id)
return redirect('pages/search/')
def login(request):
return redirect(reverse('wagtailadmin_login'), permanent=True)
def reroute(request):
return redirect('/admin/pages/search/')
urlpatterns = [
path('admin/pages/3/', reroute),
url(r'^django-admin/', include('smuggler.urls')),
url(r'^django-admin/', admin.site.urls),
path('admin/docs/', include('django.contrib.admindocs.urls')),
# comment out the below 'admin/' path to experiment with the default dashboard,
# which can be customized using wagtail hooks
path('admin/', home),
path('', login),
url(r'admin/pages/new_from_modal/$',
new_page_from_modal.new_page_from_modal, name='new_page_from_modal'),
url(r'admin/pages/search/$', joplin_search_views.search, name='search'),
url(r'admin/users/', include(user_urls)),
url(r'admin/snippets/', include(snippet_urls)),
url(r'^admin/', include(wagtailadmin_urls)),
url(r'^documents/', include(wagtaildocs_urls)),
path('__debug__/', include(debug_toolbar.urls)),
url(r'^api/graphql', jwt_token_decorator(csrf_exempt(PrivateGraphQLView.as_view()))),
url(r'^api/graphiql', csrf_exempt(PrivateGraphiQLView.as_view(graphiql=True, pretty=True))),
url(r'^api/preview/graphql', csrf_exempt(GraphQLView.as_view(schema=preview_schema))),
url(r'session_security/', include('session_security.urls')),
url(r'^performance/', include('silk.urls', namespace='silk')),
url('publish_succeeded', publish_succeeded.publish_succeeded),
url('site_search', site_search.site_search),
# For anything not caught by a more specific rule above, hand over to
# Wagtail's page serving mechanism. This should be the last pattern in
# the list:
url(r'', include(wagtail_urls)),
]
if settings.DEBUG:
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Serve static and media files from development server
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
# Generated by Django 3.0.8 on 2020-12-25 02:20
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('studentportal', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Example',
fields=[
('project', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='studentportal.Project')),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('likes_count', models.IntegerField(default=0)),
('comments_count', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Flag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=100)),
('value', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(max_length=1000)),
('date_created', models.DateTimeField(default=django.utils.timezone.now)),
('priority', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='TA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.CharField(max_length=100)),
('instructor', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('noti_type', models.IntegerField(blank=True, null=True)),
('NGO_name', models.CharField(blank=True, max_length=200)),
('NGO_link', models.URLField(blank=True)),
('NGO_details', models.TextField(blank=True)),
('NGO_sugg_by', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='studentportal.Project')),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('liked_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='liked_projects', to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='supervisor.Example')),
],
),
migrations.CreateModel(
name='Diff',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('diff_type', models.IntegerField()),
('details', models.TextField(max_length=1000, null=True)),
('when', models.DateTimeField(default=django.utils.timezone.now)),
('person', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='diff', to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='diff', to='studentportal.Project')),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=200)),
('commentor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to=settings.AUTH_USER_MODEL)),
('project', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='supervisor.Example')),
],
),
]
|
<reponame>Robertboy18/Numerical-Algorithms-Implementation<gh_stars>0
# original author : Professor <NAME>
class Autodiff_Node(object):
## A class is a recipe for creating objects (with methods and atributes).
## This is called a 'base class', which is like a boiler plate recipe that
## many other classes will use a starting point, each making specific
## changes.
## All methods (unless otherwise specified) must have the first argument
## a variable called `self`, which is a copy of the object itself. Hence,
## one can access any method or atribute in the object throught the `self`
## variable.
def __init__(self, parents):
"""Parameters:
---------------
`parents` a list of `Autodiff_Node` objects corresponding to the graph
parents."""
## initializer gets called once when you create (or instantiate) an
## object
self._set_parents(parents)
self._output_data = None
def _set_parents(self, parents):
self.parents = parents
return None
def set_output_data(self, y):
self._output_data = y
return None
def get_output_data(self):
return self._output_data
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
"""Given input `x` return output `y`"""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def backpropagation_function(x, y, output_gradient):
"""
Parameters:
--------------------
`x` is the input variable(s): a list of tensors one for each input from
a graph parent.
`y` is the output variable(s): a list of tensors one for each ouput to
a graph child.
`output_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more output variables.
Returns:
--------------------
`input_gradient` is the gradient (list of partial derivatives) of a
scalar function with respect to one or more input variables."""
## this is just a place holder (or template) to be used to create
## specific types of Node objects
return NotImplementedError
def eval(self):
"""Evaluate the output of the node, moving from necessary inputs
through the DAG in the forward direction."""
## recursively call eval for each node until input variables are reached
x = [node.eval() for node in self.parents]
return self.function(x)
def _eval_and_save_output(self):
## this is a stateful approach and should be used with care. This method
## will alter one of the atributes. This can lead to confusing and hard
## to diagnose bugs. It is best to avoid doing this whenever possible.
## recursively call eval for each node until inputs are reached
x = [node._eval_and_save_output() for node in self.parents]
y = self.function(x)
## internal data, or state, is modified here. Specifically the
## `self._output_data` attribute.
self.set_output_data(y)
return y
def _get_gradient(self, output_gradient):
## This is a helper function to assemble the gradients, moving backward
## through the DAG. We must call `_eval_and_save_output()` before
## using this method
x = [node.get_output_data() for node in self.parents]
## We use internal state here, which assumes that
## `_eval_and_save_output()` was called before using this method
y = self.get_output_data()
input_gradient = self.backpropagation_function(x, y, output_gradient)
## We use recursion combined with generators (see examples at the end of
## this notebook)
for node, sub_gradient in zip(self.parents, input_gradient):
## recursive call to the same method attached to the parent nodes
for inner_gradient in node._get_gradient(sub_gradient):
yield inner_gradient
def compute_gradient(self):
"""Assumes the node has scalar output"""
## computing gradients is very simple with the `Autodiff_node` class
## the dangerous stateful call must precede the gradient calculation
self._eval_and_save_output()
## the input is always simply `1.0` because partial_L/partial_L = 1
return [g for g in self._get_gradient(1.)]
class Add(Autodiff_Node):
"""Add two input nodes"""
## this defines a node type specifically for addition, it 'inherits' all
## of the methods and atributes from its base class, `Autodiff_Node`. Think
## of these as default methods. Any methods that are redefined here are used
## instead of the default methods from the base class
def __init__(self, a, b):
## initializer gets called once when you create (or instantiate) an
## object
parents = [a, b]
super().__init__(parents) ## calls `__init__` method of the base class
## a static modthod just means it doesn't depend on the data in `self`, so
## `self` does not need to be an argument
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a + b
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = [output_gradient*1, output_gradient*1]
return input_gradient
class Multiply(Autodiff_Node):
"""Multiply two input nodes"""
def __init__(self, a, b):
parents = [a, b]
super().__init__(parents)
@staticmethod
def function(x):
a = x[0]
b = x[1]
return a*b
@staticmethod
def backpropagation_function(x, y, output_gradient):
a = x[0]
b = x[1]
input_gradient = [output_gradient*b, output_gradient*a]
return input_gradient
class Tanh(Autodiff_Node):
"""Apply the `tanh` function to an input node"""
def __init__(self, x):
parents = [x]
super().__init__(parents)
@staticmethod
def function(x):
return np.tanh(x[0])
@staticmethod
def backpropagation_function(x, y, output_gradient):
dydx = 1./np.cosh(x[0])**2
input_gradient = [output_gradient*dydx]
return input_gradient
class Input_Variable(Autodiff_Node):
"""Input Variables have a specific fixed value. Use these to hold parameters
and variables. Gradient of a node with a scalar output will be a list of
partial derivatives with respect to these Input Variables.
Parameters:
---------------
`value` the numerical value of the variable (scalar in this example)."""
def __init__(self, value):
self.value = value
parents = []
super().__init__(parents)
@staticmethod
def function(x):
return self.value
@staticmethod
def backpropagation_function(x, y, output_gradient):
input_gradient = output_gradient
return input_gradient
def eval(self):
## this overrides the default `eval` method defined in `Autodiff_Node`
## base class
return self.value
def _eval_and_save_output(self): ## another override
self.set_output_data(self.value)
return self.value
def _get_gradient(self, output_gradient): ## another override
yield output_gradient |
<reponame>Croydon-Brixton/qthought<filename>qthought/agents.py
# Basic library for implementing Agent systems in ProjectQ
# Date: 8 Dec 2018
# Author: <NAME>
# Contact: <EMAIL>
from math import ceil
from numpy import log2
from warnings import warn
from projectq.meta import Control
from projectq.ops import *
from .utils.arithmeticgates import Add
from .utils.general import int_to_bit_array, readout, state_i_to_1
# ----------------------------------------------------------------------------------------------------------------------
class InferenceTable:
"""
Class to store inference tables for agents.
"""
def __init__(self, input_var: str, input_time: int, output_var: str, output_time: int, tbl: dict):
"""
Creates an InferenceTable object
:param input_var: str, Name of the considered input variable/subsystem that is observed
:param input_time: int, Time at which the observation of the input variable happens in the protocol
:param output_var: str, Name of the considered output variable/subsytem that is observed
:param output_time: int, Time at which the observation of the output variable happens in the protocol
:param tbl: dict, The dictionary containing for each possible value of the input_var (key) a list of
possible outcomes of the output_var (value)
"""
self.input = input_var
self.input_time = input_time
self.output = output_var
self.output_time = output_time
self.tbl = tbl
# Parse the inputs
self.parse()
def __getitem__(self, index: str):
if index == 'input':
return self.input, self.input_time
elif index == 'output':
return self.output, self.output_time
elif index == 'table':
return self.tbl
else:
assert False, 'inference_table object has no attribute %s. Allowed values: input, output, table' % index
def __str__(self) -> str:
full_str = 'In:({0}:t{1})'.format(self.input, self.input_time).ljust(22) + '|' + \
' Out: ({0}:t{1})'.format(self.output, self.output_time)
full_str += '\n' + '-' * (len(full_str) + 7)
for key, val in self.tbl.items():
full_str += '\n \t {0}'.format(key).ljust(15) + '| \t ' + str(val)
return full_str.expandtabs(10)
def __len__(self) -> int:
"""Returns the length of the dict belonging to the inference table"""
return len(self.tbl)
def __repr__(self) -> str:
"""
Give complete information on inference table without print statement in
command-line
"""
return self.__str__()
def parse(self):
for key, value in self.tbl.items():
assert isinstance(key, int), "Invalid inference table. Use keys in the format (int) 0,1,2, ... " \
"and make sure the number of provided keys matches the dimension of " \
"the memory system."
assert isinstance(value, list), "Invalid inference table value. Can only predict integer values " \
"corresponding to comp. basis states of pred. system"
assert isinstance(value[0], int), "Please provide states as int corresponding to your respective" \
" bitstring."
# ----------------------------------------------------------------------------------------------------------------------
class Agent:
"""
Class to implement agents in projectq. An Agent consists of
(1) A memory qureg (to store the memory as a state)
(2) A prediction qureg (to store the prediction as a state)
(3) An inference system (used to make an inference from the memory to the prediction system
with the help of an inference table and the inference mechanism described in XXX.
"""
def __init__(self, eng, n_memory:int, n_pred:int, inference_table=None, no_prediction_state=0):
"""
:param eng: ProjectQ.engine, engine to which the qubits of Agent are allocated
:param n_memory: int, number of memory qubits of the Agent
:param n_pred: int, number of prediction qubits of the Agent
:param inference_table: precomputed inference table
:type inference_table: InferenceTable
"""
assert n_memory >= n_pred, 'Cannot make more different predictions than observed memory states'
self.n_inference = 2 ** n_memory * n_pred
n_inference = self.n_inference
self.memory_ = eng.allocate_qureg(n_memory)
self.pred_ = eng.allocate_qureg(n_pred)
self.inference_sys_ = eng.allocate_qureg(n_inference)
self.inference_made_ = False
self.inf_sys_prepared_ = False
self.eng_ = eng
self.n_qubits = n_memory + n_pred + n_inference
# Initialize empty inference table with zeros and generate
# inference_dict for easier access of inference system:
self.inference_table_ = {}
self.inference_dict_ = {}
for i in range(2 ** n_memory):
self.inference_table_[i] = no_prediction_state
# inference_dict_[i] returns the qubits needed for
# prediction belonging to memory state i
self.inference_dict_[i] = self.inference_sys_[i * n_pred:(i + 1) * n_pred]
if inference_table is not None:
self.set_inference_table(inference_table)
@classmethod
def from_dim(cls, eng, memory_dim:int, pred_dim:int, inference_table=None):
'''
Used to initialize an Agent via giving the dimensions of the memory and prediction system.
:param eng: ProjectQ.engine, engine to which the qubits of Agent are allocated
:param memory_dim: int, dimension of memory system of Agent
:param pred_dim: int, dimension of predictions of Agent (d-1 predictions + '?' possible)
:param inference_table: dict, precomputed inference table
'''
n_memory = ceil(log2(memory_dim))
n_pred = ceil(log2(pred_dim))
return cls(eng, n_memory, n_pred, inference_table)
def __len__(self):
"""Returns the number of qubits of the quantum system"""
return self.n_qubits
def __getitem__(self, index: int):
""" Method to access the Agent qubits.
:int index:
:return:
"""
full_sys = self.memory_ + self.pred_ + self.inference_sys_
return full_sys[index]
def memory(self):
"""Getter for the memory register of the agent"""
return self.memory_
def prediction(self):
"""Getter for the prediction register of the agent"""
return self.pred_
def inference_sys(self):
"""Getter for the inference system register of the agent"""
return self.inference_sys_
def all(self, with_inf_sys=True):
"""Getter for all registers that make up the agent combined."""
if with_inf_sys:
return self.memory() + self.prediction() + self.inference_sys()
else:
return self.memory() + self.prediction()
def set_inference_table(self, inference_table, no_prediction_state: int = 0):
""" Initializes the agents inference table with the given inference table.
:inference_table object inference_table: The inference system of the Agent
:int no_prediction_state: The state corresponding to the agents statement 'I do not know'
:return:
"""
assert len(inference_table) <= 2**len(self.memory_), \
'Your inference table is too long and cannot be stored in the requested no. of qubits. ' \
'Make your inference table smaller or raise the number of memory qubits n_memory of the Agent. '
inference_table.parse()
for key, predictions in inference_table.tbl.items():
if len(predictions) > 1:
self.inference_table_[key] = no_prediction_state
else:
assert predictions[0] < 2 ** len(self.pred_), 'Inference value is higher than the provided ' \
'number of prediction qubits can store.'
self.inference_table_[key] = predictions[0]
def get_inference_table(self):
"""Getter for the agents inference table."""
return self.inference_table_
def prep_inference(self):
"""Loads the agents inference table into the inference system."""
for key, qureg in self.inference_dict_.items():
pred_value = self.inference_table_[key]
n_pred = len(qureg)
to_flip = int_to_bit_array(pred_value, n_pred)[::-1]
for ix, flip in enumerate(to_flip):
if flip:
X | qureg[ix]
self.inf_sys_prepared_ = True
def make_inference(self, reverse=False):
"""Calls the inference operation, i.e.
calls the circuit that copies the prediction state belonging to the state i of the memory into the
prediction register."""
if not self.inf_sys_prepared_:
warn('make_inference called without setting an inference_table')
for key, Ti in self.inference_dict_.items():
# Step 1: Transform memory state corresponding to input i into 'All(1)' state
i = int(key)
state_i_to_1(i, self.memory_)
with Control(self.eng_, self.memory_):
# Step 2: Controlled on the memory state being in 'All(1)' state
# and Ti being true, copy the i-th prediction in the prediction register
if reverse:
get_inverse(Add) | (Ti, self.pred_)
else:
Add | (Ti, self.pred_)
# Step 3: Transform memory state back from All(1) to i
state_i_to_1(i, self.memory_)
self.inference_made_ = True
def readout(self):
"""Reads out the memory and prediction registers and returns the results"""
# TODO: Update as this produces issues
obs = readout(self.memory_)
pred = readout(self.pred_)
return obs, pred |
import base64
import hashlib
import json
import os
import re
import smtplib
import sys
import urllib
from django.core.context_processors import csrf
from django.core.validators import validate_email
from django.db.utils import IntegrityError
from django.http import *
from django.shortcuts import render_to_response
from django.utils.http import urlquote_plus
from django.views.decorators.csrf import csrf_exempt
from multiprocessing import Pool
from browser.utils import *
from core.db.manager import DataHubManager
from inventory.models import *
p = os.path.abspath(os.path.dirname(__file__))
'''
@author: <NAME>
@date: Feb 12, 2012
'''
kEmail = "SESSION_EMAIL"
kUsername = "SESSION_USERNAME"
# for async calls
pool = Pool(processes=1)
'''
LOGIN/REGISTER/RESET
'''
def is_valid_username (username):
try:
if len(username) >3 and re.match(r'\w+', username).group() == username:
return True
except:
pass
return False
def login_required (f):
def wrap (request, *args, **kwargs):
if kEmail not in request.session.keys():
redirect_url = urlquote_plus(request.get_full_path())
return HttpResponseRedirect("/account/login?redirect_url=%s" %(redirect_url))
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def login_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('login.html', c)
def register_form (request, redirect_url='/', errors=[]):
c = {'redirect_url':redirect_url, 'errors':errors, 'values':request.REQUEST}
c.update(csrf(request))
return render_to_response('register.html', c)
def login (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if not redirect_url or redirect_url == '':
redirect_url = '/'
if request.method == "POST":
errors = []
login_email = ''
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
email = None
try:
login_id = request.POST["login_id"].lower()
login_password = hashlib.sha1(request.POST["login_password"]).hexdigest()
# find the user email in the username, if it's there.
try:
validate_email(login_id.lower().strip())
email = login_id.lower().strip()
except:
pass
user = None
if email:
user = User.objects.get(email=login_id, password=<PASSWORD>)
else:
user = User.objects.get(username=login_id, password=<PASSWORD>)
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(user.username))
return HttpResponseRedirect(redirect_url)
except User.DoesNotExist:
try:
if email:
User.objects.get(email=login_id)
else:
User.objects.get(username=login_id)
errors.append(
'Wrong password. Please try again.<br /><br />'
'<a class="blue bold" href="/account/forgot">Click Here</a> '
'to reset your password.')
except User.DoesNotExist:
errors.append(
'Could not find any account associated with login_id: '
'%s.<br /><br /><a class="blue bold" '
'href="/account/register?redirect_url=%s">Click Here</a> '
'to create an account.' %(login_id,
urllib.quote_plus(redirect_url)))
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
except:
errors.append('Login failed.')
return login_form(
request, redirect_url = urllib.quote_plus(redirect_url),
errors = errors)
else:
try:
if request.session[kUsername]:
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(request.session[kUsername]))
return HttpResponseRedirect(redirect_url)
else:
return login_form(request, urllib.quote_plus(redirect_url))
except:
return login_form(request, urllib.quote_plus(redirect_url))
def register (request):
redirect_url = '/'
if('redirect_url' in request.GET.keys()):
redirect_url = urllib.unquote_plus(request.GET['redirect_url'])
if request.method == "POST":
errors = []
email = ''
try:
error = False
if('redirect_url' in request.POST.keys()):
redirect_url = urllib.unquote_plus(request.POST['redirect_url'])
username = request.POST["username"].lower()
email = request.POST["email"].lower()
password = request.POST["password"]
try:
validate_email(email.strip())
except:
errors.append("Invalid Email.")
error = True
if(not is_valid_username(username)):
errors.append("Invalid Username.")
error = True
if(password == ""):
errors.append("Empty Password.")
error = True
try:
user = User.objects.get(username=username)
errors.append("Username already taken.")
error = True
except User.DoesNotExist:
pass
if not error:
hashed_password = hashlib.sha1(password).hexdigest()
try:
DataHubManager.create_user(username=username, password=<PASSWORD>_password)
except Exception, e:
print e
pass
try:
DataHubManager.change_password(username=username, password=<PASSWORD>)
except Exception, e:
errors.append(str(e))
error = True
if(error):
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
user = User(username=username, email=email, password=<PASSWORD>)
user.save()
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
encrypted_email = encrypt_text(user.email)
subject = "Welcome to DataHub"
msg_body = '''
Dear %s,
Thanks for registering to DataHub.
Please click the link below to start using DataHub:
%s://%s/account/verify/%s
''' % (
user.email,
'https' if request.is_secure() else 'http',
request.get_host(),
encrypted_email)
pool.apply_async(send_email, [user.email, subject, msg_body])
redirect_url = redirect_url + urllib.unquote_plus('?auth_user=%s' %(user.username))
return HttpResponseRedirect(redirect_url)
except IntegrityError:
errors.append(
'Account with the email address <a href="mailto:%s">%s</a> already exists.<br /> <br />Please <a class="blue bold" href="/account/login?login_email=%s">Sign In</a>.'
% (email, email, urllib.quote_plus(email)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
except Exception, e:
errors.append("Error %s." %(str(e)))
return register_form(request, redirect_url = urllib.quote_plus(redirect_url), errors = errors)
else:
return register_form(request, redirect_url = urllib.quote_plus(redirect_url))
def clear_session (request):
request.session.flush()
if kEmail in request.session.keys():
del request.session[kEmail]
if kUsername in request.session.keys():
del request.session[kUsername]
def logout (request):
clear_session(request)
c = {
'msg_title': 'Thank you for using DataHub!',
'msg_body': 'Your have been logged out.<br /><br /><a href="/account/login">Click Here</a> to sign in again.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def forgot (request):
if request.method == "POST":
errors = []
try:
user_email = request.POST["email"].lower()
user = User.objects.get(email=user_email)
encrypted_email = encrypt_text(user_email)
subject = "DataHub Password Reset"
msg_body = '''
Dear %s,
Please click the link below to reset your DataHub password:
%s://%s/account/reset/%s
''' % (
user.email,
'https' if request.is_secure() else 'http',
request.get_host(),
encrypted_email)
pool.apply_async(send_email, [user_email, subject, msg_body])
c = {
'msg_title': 'DataHub Reset Password',
'msg_body': 'A link to reset your password has been sent to your email address.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except User.DoesNotExist:
errors.append(
"Invalid Email Address.")
except Exception, e:
errors.append(
'Error: %s.'
'Please try again or send an email to '
'<a href="mailto:<EMAIL>"><EMAIL></a>.' %(str(e)))
c = {'errors': errors, 'values': request.POST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
else:
c = {'values': request.REQUEST}
c.update(csrf(request))
return render_to_response('forgot.html', c)
def verify (request, encrypted_email):
errors = []
c = {'msg_title': 'DataHub Account Verification'}
try:
user_email = decrypt_text(encrypted_email)
user = User.objects.get(email=user_email)
c.update({
'msg_body': 'Thanks for verifying your email address!<br /> <br /><a href="/">Click Here</a> to start using DataHub.'
})
clear_session(request)
request.session[kEmail] = user.email
request.session[kUsername] = user.username
except:
errors.append(
'Wrong verify code in the URL. '
'Please try again or send an email to '
'<a href="mailto:<EMAIL>"><EMAIL></a>')
c.update({'errors': errors})
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def reset (request, encrypted_email):
errors = []
error = False
if request.method == "POST":
try:
user_email = request.POST["user_email"].lower()
password = request.POST["new_password"]
password2 = request.POST["<PASSWORD>"]
if password == "":
errors.append("Empty Password.")
error = True
if password2 != password:
errors.append("Password and Confirm Password don't match.")
error = True
if not error:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
try:
DataHubManager.create_user(username=user.username, password=<PASSWORD>)
except Exception, e:
pass
try:
DataHubManager.change_password(username=user.username, password=<PASSWORD>)
except Exception, e:
errors.append(str(e))
error = True
if error:
c = {
'user_email': user_email,
'encrypted_email': encrypted_email,
'errors': errors
}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
hashed_password = hashlib.sha1(password).hexdigest()
user = User.objects.get(email=user_email)
user.password = <PASSWORD>
user.save()
c = {
'msg_title': 'DataHub Reset Password',
'msg_body': 'Your password has been changed successfully.<br /> <br />'
'<a href="/account/login" class="blue bold">Click Here</a>'
' to sign in.'
}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
except:
errors.append(
'Some unknown error happened. '
'Please try again or send an email to '
'<a href="mailto:<EMAIL>"><EMAIL></a>')
c = {'errors': errors}
c.update(csrf(request))
return render_to_response('reset.html', c)
else:
try:
user_email = decrypt_text(encrypted_email)
User.objects.get(email=user_email)
c = {
'user_email': user_email,
'encrypted_email': encrypted_email
}
c.update(csrf(request))
return render_to_response('reset.html', c)
except:
errors.append(
'Wrong reset code in the URL. '
'Please try again or send an email to '
'<a href="mailto:<EMAIL>"><EMAIL></a>')
c = {'msg_title': 'DataHub Reset Password', 'errors': errors}
c.update(csrf(request))
return render_to_response('confirmation.html', c)
def get_login(request):
login = None
try:
login = request.session[kUsername]
except:
pass
return login
@login_required
def jdbc_password(request):
login = request.session[kUsername]
user = User.objects.get(username=login)
return HttpResponse(user.password)
|
"""Combine multiple structural variation callers into single output file.
Takes a simple union approach for reporting the final set of calls, reporting
the evidence from each input.
"""
import fileinput
import os
import shutil
import toolz as tz
import vcf
from bcbio import utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import shared
from bcbio.structural import validate
from bcbio.variation import bedutils
# ## Conversions to simplified BED files
MAX_SVSIZE = 1e6 # 1Mb maximum size from callers to avoid huge calls collapsing all structural variants
def _vcf_to_bed(in_file, caller, out_file):
if in_file and in_file.endswith((".vcf", "vcf.gz")):
with utils.open_gzipsafe(in_file) as in_handle:
with open(out_file, "w") as out_handle:
for rec in vcf.Reader(in_handle, in_file):
if not rec.FILTER:
if (rec.samples[0].gt_type and
not (hasattr(rec.samples[0].data, "FT") and rec.samples[0].data.FT)):
start = rec.start - 1
end = int(rec.INFO.get("END", rec.start))
if end - start < MAX_SVSIZE:
out_handle.write("\t".join([rec.CHROM, str(start), str(end),
"%s_%s" % (_get_svtype(rec), caller)])
+ "\n")
def _get_svtype(rec):
try:
return rec.INFO["SVTYPE"]
except KeyError:
return "-".join(str(x).replace("<", "").replace(">", "") for x in rec.ALT)
def _cnvbed_to_bed(in_file, caller, out_file):
"""Convert cn_mops CNV based bed files into flattened BED
"""
import pybedtools
with open(out_file, "w") as out_handle:
for feat in pybedtools.BedTool(in_file):
out_handle.write("\t".join([feat.chrom, str(feat.start), str(feat.end),
"cnv%s_%s" % (feat.score, caller)])
+ "\n")
def _copy_file(in_file, caller, out_file):
shutil.copy(in_file, out_file)
CALLER_TO_BED = {"lumpy": _vcf_to_bed,
"delly": _vcf_to_bed,
"cn_mops": _cnvbed_to_bed,
"wham": _copy_file}
def _create_bed(call, base_file, data):
"""Create a simplified BED file from caller specific input.
"""
out_file = "%s-%s.bed" % (utils.splitext_plus(base_file)[0], call["variantcaller"])
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
convert_fn = CALLER_TO_BED.get(call["variantcaller"])
if convert_fn:
convert_fn(call["vrn_file"], call["variantcaller"], tx_out_file)
if utils.file_exists(out_file):
return out_file
# ## Top level
def summarize(calls, data):
"""Summarize results from multiple callers into a single flattened BED file.
"""
import pybedtools
sample = tz.get_in(["rgnames", "sample"], data)
work_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "structural",
sample, "ensemble"))
out_file = os.path.join(work_dir, "%s-ensemble.bed" % sample)
with shared.bedtools_tmpdir(data):
input_beds = filter(lambda x: x is not None,
[_create_bed(c, out_file, data) for c in calls])
if len(input_beds) > 0:
size_beds = []
for e_start, e_end in validate.EVENT_SIZES:
base, ext = os.path.splitext(out_file)
size_out_file = "%s-%s_%s%s" % (base, e_start, e_end, ext)
if not utils.file_exists(size_out_file):
with file_transaction(data, size_out_file) as tx_out_file:
with shared.bedtools_tmpdir(data):
all_file = "%s-all.bed" % utils.splitext_plus(tx_out_file)[0]
with open(all_file, "w") as out_handle:
for line in fileinput.input(input_beds):
chrom, start, end = line.split()[:3]
size = int(end) - int(start)
if size >= e_start and size < e_end:
out_handle.write(line)
pybedtools.BedTool(all_file).sort(stream=True)\
.merge(c=4, o="distinct", delim=",").saveas(tx_out_file)
size_beds.append(size_out_file)
out_file = bedutils.combine(size_beds, out_file, data["config"])
if utils.file_exists(out_file):
bedprep_dir = utils.safe_makedir(os.path.join(os.path.dirname(out_file), "bedprep"))
calls.append({"variantcaller": "ensemble",
"vrn_file": bedutils.clean_file(out_file, data, bedprep_dir=bedprep_dir)})
return calls
|
#!/usr/bin/env python2.7
# pylint: disable=bad-indentation, no-member, invalid-name, line-too-long
import os
import shutil
import random
import argparse
import multiprocessing
import cv2
import lmdb
import caffe
import numpy as np
from jfda.config import cfg
from jfda.utils import load_wider, load_celeba
from jfda.utils import get_logger, crop_face
from jfda.detector import JfdaDetector
import pyximport
pyximport.install(setup_args={'include_dirs': np.get_include()})
from bbox import bbox_overlaps
logger = get_logger()
G8 = 8*1024*1024*1024
G16 = 2*G8
G24 = 3*G8
G32 = 4*G8
def fill_queues(data, qs):
data_n = len(data)
queue_n = len(qs)
for i in range(len(data)):
qs[i%queue_n].put(data[i])
def remove_if_exists(db):
if os.path.exists(db):
logger.info('remove %s'%db)
shutil.rmtree(db)
def get_detector():
nets = cfg.PROPOSAL_NETS[cfg.NET_TYPE]
if nets is None or not cfg.USE_DETECT:
detector = None
else:
if cfg.GPU_ID >= 0:
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.set_mode_cpu()
detector = JfdaDetector(nets)
return detector
# =========== region proposal =============================
def sliding_windows(x, y, width, height, kw, kh, sw, sh):
'''given a region (x, y, width, height), return sliding window locations (x1, y1, x2, y2)
x, y: region top left position
width, height: region width and height
kw, kh: window width and height
sw, sh: stride width and height
'''
xs = np.arange(0, width-kw, sw)
ys = np.arange(0, height-kh, sh)
xs, ys = np.meshgrid(xs, ys)
xy = np.vstack([xs.ravel(), ys.ravel()]).transpose()
wh = np.array([kw, kh])
bbox = np.hstack([xy, np.tile(wh, (len(xy), 1))])
bbox[:, 0] += x
bbox[:, 1] += y
bbox[:, 2] += bbox[:, 0]
bbox[:, 3] += bbox[:, 1]
return bbox.astype(np.float32)
def proposal(img, gt_bboxes, detector=None):
'''given an image with face bboxes, proposal negatives, positives and part faces
for rNet and oNet, we use previous networks to proposal bboxes
Return
(negatives, positives, part)
negatives: [data, bbox]
positives: [(data, bbox, bbox_target)]
part: [(data, bbox, bbox_target)]
'''
# ======================= proposal for rnet and onet ==============
if detector is not None:
assert isinstance(detector, JfdaDetector)
bboxes = detector.detect(img, **cfg.DETECT_PARAMS)
# # maybe sort it by score in descending order
# bboxes = bboxes[bboxes[:, 4].argsort()[::-1]]
# keep bbox info, drop score, offset and landmark
bboxes = bboxes[:, :4]
ovs = bbox_overlaps(bboxes, gt_bboxes)
ovs_max = ovs.max(axis=1)
ovs_idx = ovs.argmax(axis=1)
pos_idx = np.where(ovs_max > cfg.FACE_OVERLAP)[0]
neg_idx = np.where(ovs_max < cfg.NONFACE_OVERLAP)[0]
part_idx = np.where(np.logical_and(ovs_max > cfg.PARTFACE_OVERLAP, ovs_max <= cfg.FACE_OVERLAP))[0]
# pos
positives = []
for idx in pos_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('pos', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
positives.append((data, bbox, bbox_target))
# part
part = []
for idx in part_idx:
bbox = bboxes[idx].reshape(4)
gt_bbox = gt_bboxes[ovs_idx[idx]]
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
k = bbox[2] - bbox[0]
bbox_target = (gt_bbox - bbox) / k
part.append((data, bbox, bbox_target))
# neg
negatives = []
np.random.shuffle(neg_idx)
for idx in neg_idx[:cfg.NEG_DETECT_PER_IMAGE]:
bbox = bboxes[idx].reshape(4)
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('neg', data)
# cv2.waitKey()
negatives.append((data, bbox))
return negatives, positives, part
# ======================= proposal for pnet =======================
height, width = img.shape[:-1]
negatives, positives, part = [], [], []
# ===== proposal positives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_positives = []
for scale in cfg.POS_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.POS_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
pos_bboxes = candidates[ovs > cfg.FACE_OVERLAP, :]
if len(pos_bboxes) > 0:
np.random.shuffle(pos_bboxes)
for bbox in pos_bboxes[:cfg.POS_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('positive', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_positives.append((data, bbox, bbox_target))
random.shuffle(this_positives)
positives.extend(this_positives[:cfg.POS_PER_FACE])
# ===== proposal part faces =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_part = []
for scale in cfg.PART_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.PART_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bbox.reshape((1, 4)))
ovs = ovs.reshape((1, len(candidates)))[0]
part_bboxes = candidates[np.logical_and(ovs > cfg.PARTFACE_OVERLAP, ovs <= cfg.FACE_OVERLAP), :]
if len(part_bboxes) > 0:
np.random.shuffle(part_bboxes)
for bbox in part_bboxes[:cfg.PART_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('part', data)
# cv2.waitKey()
bbox_target = (gt_bbox - bbox) / k
this_part.append((data, bbox, bbox_target))
random.shuffle(this_part)
part.extend(this_part[:cfg.POS_PER_FACE])
# ===== proposal negatives =====
for gt_bbox in gt_bboxes:
x, y = gt_bbox[:2]
w, h = gt_bbox[2]-gt_bbox[0], gt_bbox[3]-gt_bbox[1]
this_negatives = []
for scale in cfg.NEG_PROPOSAL_SCALES:
k = max(w, h) * scale
stride = cfg.NEG_PROPOSAL_STRIDE
s = k * stride
offset_x = (0.5 + np.random.rand()) * k / 2.
offset_y = (0.5 + np.random.rand()) * k / 2.
candidates = sliding_windows(x-offset_x, y-offset_y, w+2*offset_x, h+2*offset_y, k, k, s, s)
ovs = bbox_overlaps(candidates, gt_bboxes)
neg_bboxes = candidates[ovs.max(axis=1) < cfg.NONFACE_OVERLAP, :]
if len(neg_bboxes) > 0:
np.random.shuffle(neg_bboxes)
for bbox in neg_bboxes[:cfg.NEG_PER_FACE]:
data = crop_face(img, bbox)
if data is None:
continue
# cv2.imshow('negative', data)
# cv2.waitKey()
this_negatives.append((data, bbox))
random.shuffle(this_negatives)
negatives.extend(this_negatives[:cfg.NEG_PER_FACE])
# negatives from global image random crop
max_num_from_fr = int(cfg.NEG_PER_IMAGE * cfg.NEG_FROM_FR_RATIO)
if len(negatives) > max_num_from_fr:
random.shuffle(negatives)
negatives = negatives[:max_num_from_fr]
bbox_neg = []
range_x, range_y = width - cfg.NEG_MIN_SIZE, height - cfg.NEG_MIN_SIZE
for i in xrange(cfg.NEG_PROPOSAL_RATIO * cfg.NEG_PER_IMAGE):
x1, y1 = np.random.randint(range_x), np.random.randint(range_y)
w = h = np.random.randint(low=cfg.NEG_MIN_SIZE, high=min(width-x1, height-y1))
x2, y2 = x1 + w, y1 + h
bbox_neg.append([x1, y1, x2, y2])
if x2 > width or y2 > height:
print 'hhhh'
bbox_neg = np.asarray(bbox_neg, dtype=gt_bboxes.dtype)
ovs = bbox_overlaps(bbox_neg, gt_bboxes)
bbox_neg = bbox_neg[ovs.max(axis=1) < cfg.NONFACE_OVERLAP]
np.random.shuffle(bbox_neg)
if not cfg.NEG_FORCE_BALANCE:
remain = cfg.NEG_PER_IMAGE - len(negatives)
else:
# balance ratio from face region and global crop
remain = len(negatives) * (1. - cfg.NEG_FROM_FR_RATIO) / cfg.NEG_FROM_FR_RATIO
remain = int(remain)
bbox_neg = bbox_neg[:remain]
# for bbox in bbox_neg:
# x1, y1, x2, y2 = bbox
# x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
# cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 1)
# cv2.imshow('neg', img)
# cv2.waitKey()
for bbox in bbox_neg:
data = crop_face(img, bbox)
negatives.append((data, bbox))
return negatives, positives, part
# =========== WIDER ================
def gen_wider():
logger.info('loading WIDER')
train_data, val_data = load_wider()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
train_faces = reduce(lambda acc, x: acc + len(x[1]), train_data, 0)
val_faces = reduce(lambda acc, x: acc + len(x[1]), val_data, 0)
logger.info('total faces, train: %d, val: %d', train_faces, val_faces)
def gen(data, db_names):
for db_name in db_names: remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=wider_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=wider_writer_func, args=(q_out, db_names))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
db_names = ['data/%snet_positive_train'%cfg.NET_TYPE,
'data/%snet_negative_train'%cfg.NET_TYPE,
'data/%snet_part_train'%cfg.NET_TYPE]
gen(train_data, db_names)
logger.info('writing val data, %d images', len(val_data))
db_names = ['data/%snet_positive_val'%cfg.NET_TYPE,
'data/%snet_negative_val'%cfg.NET_TYPE,
'data/%snet_part_val'%cfg.NET_TYPE]
gen(val_data, db_names)
def wider_reader_func(q_in, q_out):
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter % 1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bboxes = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
negatives, positives, part = proposal(img, bboxes, detector)
for data, _ in negatives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
q_out.put(('negative', [data]))
for data, _, bbox_target in positives:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('positive', [data, bbox_target]))
for data, _, bbox_target in part:
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
bbox_target = bbox_target.astype(np.float32).tostring() # float32
q_out.put(('part', [data, bbox_target]))
def wider_writer_func(q_out, db_names):
db_pos = lmdb.open(db_names[0], map_size=G16)
db_neg = lmdb.open(db_names[1], map_size=G16)
db_part = lmdb.open(db_names[2], map_size=G16)
txn_pos = db_pos.begin(write=True)
txn_neg = db_neg.begin(write=True)
txn_part = db_part.begin(write=True)
idx_pos, idx_neg, idx_part = 0, 0, 0
q_pos, q_neg, q_part = [], [], []
def fill(txn, items, idx, has_bbox=True):
random.shuffle(items)
for item in items:
data_key = '%08d_data'%idx
txn.put(data_key, item[0])
if has_bbox:
bbox_key = <KEY>
txn.put(bbox_key, item[1])
idx += 1
return idx
counter = 0
pos_counter, neg_counter, part_counter = 0, 0, 0
while True:
stat, item = q_out.get()
counter += 1
if counter % 10000 == 0:
logger.info('writes %d positives, %d negatives, %d part', pos_counter, neg_counter, part_counter)
if stat == 'positive':
pos_counter += 1
q_pos.append(item)
if len(q_pos) >= cfg.SHUFFLE_SIZE:
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
q_pos = []
elif stat == 'negative':
neg_counter += 1
q_neg.append(item)
if len(q_neg) >= cfg.SHUFFLE_SIZE:
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
q_neg = []
elif stat == 'part':
part_counter += 1
q_part.append(item)
if len(q_part) >= cfg.SHUFFLE_SIZE:
idx_part = fill(txn_part, q_part, idx_part, True)
q_part = []
else:
# stat == 'finish'
idx_pos = fill(txn_pos, q_pos, idx_pos, True)
txn_pos.put('size', str(idx_pos))
idx_neg = fill(txn_neg, q_neg, idx_neg, False)
txn_neg.put('size', str(idx_neg))
idx_part = fill(txn_part, q_part, idx_part, True)
txn_part.put('size', str(idx_part))
break
txn_pos.commit()
txn_neg.commit()
txn_part.commit()
db_pos.close()
db_neg.close()
db_part.close()
logger.info('Finish')
# =========== CelebA ===============
def gen_celeba():
logger.info('loading CelebA')
train_data, val_data = load_celeba()
logger.info('total images, train: %d, val: %d', len(train_data), len(val_data))
def gen(data, db_name):
remove_if_exists(db_name)
logger.info('fill queues')
q_in = [multiprocessing.Queue() for i in range(cfg.WORKER_N)]
q_out = multiprocessing.Queue(1024)
fill_queues(data, q_in)
readers = [multiprocessing.Process(target=celeba_reader_func, args=(q_in[i], q_out)) \
for i in range(cfg.WORKER_N)]
for p in readers:
p.start()
writer = multiprocessing.Process(target=celeba_writer_func, args=(q_out, db_name))
writer.start()
for p in readers:
p.join()
q_out.put(('finish', []))
writer.join()
logger.info('writing train data, %d images', len(train_data))
gen(train_data, 'data/%snet_landmark_train'%cfg.NET_TYPE)
logger.info('writing val data, %d images', len(val_data))
gen(val_data, 'data/%snet_landmark_val'%cfg.NET_TYPE)
def celeba_reader_func(q_in, q_out):
def vertify_bbox(bbox, landmark):
return True
input_size = cfg.NET_INPUT_SIZE[cfg.NET_TYPE]
detector = get_detector()
counter = 0
while not q_in.empty():
item = q_in.get()
counter += 1
if counter%1000 == 0:
logger.info('%s reads %d', multiprocessing.current_process().name, counter)
img_path, bbox, landmark = item
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
if img is None:
logger.warning('read %s failed', img_path)
continue
bbox = np.asarray(bbox, dtype=np.float32).reshape((1, -1))
_1, bboxes, _2 = proposal(img, bbox, detector)
np.random.shuffle(bboxes)
for data, bbox, _ in bboxes[:cfg.LANDMARK_PER_FACE]:
# make sure landmark points are in bbox
landmark1 = landmark.reshape((-1, 2)).copy()
if not vertify_bbox(bbox, landmark1):
continue
# # debug
# img1 = img.copy()
# x1, y1, x2, y2 = int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])
# cv2.rectangle(img1, (x1, y1), (x2, y2), (0, 0, 255), 2)
# for x, y in landmark1:
# x, y = int(x), int(y)
# cv2.circle(img1, (x, y), 2, (0, 255, 0), -1)
# cv2.imshow('landmark', img1)
# cv2.waitKey(0)
# normalize landmark
w, h = bbox[2]-bbox[0], bbox[3]-bbox[1]
landmark1[:, 0] = (landmark1[:, 0] - bbox[0]) / w
landmark1[:, 1] = (landmark1[:, 1] - bbox[1]) / h
landmark1 = landmark1.reshape(-1)
# format data
data = cv2.resize(data, (input_size, input_size))
data = data.tostring() # string for lmdb, uint8
landmark1 = landmark1.astype(np.float32).tostring() # float32
q_out.put(('data', [data, landmark1]))
def celeba_writer_func(q_out, db_name):
map_size = G16
db = lmdb.open(db_name, map_size=map_size)
counter = 0
with db.begin(write=True) as txn:
while True:
stat, item = q_out.get()
if stat == 'finish':
txn.put('size', str(counter))
break
data, landmark = item
data_key = '%08d_data'%counter
landmark_key = '%08d_landmark'%counter
txn.put(data_key, data)
txn.put(landmark_key, landmark)
counter += 1
if counter%1000 == 0:
logger.info('writes %d landmark faces', counter)
db.close()
logger.info('Finish')
def test():
os.system('rm -rf tmp/pos/*')
os.system('rm -rf tmp/neg/*')
os.system('rm -rf tmp/part/*')
logger.info('Load WIDER')
train_data, val_data = load_wider()
img_path, bboxes = train_data[np.random.choice(len(train_data))]
bboxes = np.asarray(bboxes)
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
detector = JfdaDetector(cfg.PROPOSAL_NETS['r'])
negatives, positives, part = proposal(img, bboxes, detector)
logger.info('%d gt_bboxes', len(bboxes))
logger.info('%d negatives, %d positives, %d part', len(negatives), len(positives), len(part))
for i, (data, bbox_target) in enumerate(positives):
cv2.imwrite('tmp/pos/%03d.jpg'%i, data)
for i, (data) in enumerate(negatives):
cv2.imwrite('tmp/neg/%03d.jpg'%i, data)
for i, (data, bbox_target) in enumerate(part):
cv2.imwrite('tmp/part/%03d.jpg'%i, data)
cv2.imwrite('tmp/test.jpg', img)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--net', type=str, default='p', help='net type')
parser.add_argument('--celeba', action='store_true', help='generate face data')
parser.add_argument('--wider', action='store_true', help='generate landmark data')
parser.add_argument('--gpu', type=int, default=0, help='gpu device')
parser.add_argument('--detect', action='store_true', help='use previous network detection')
parser.add_argument('--worker', type=int, default=8, help='workers to process the data')
parser.add_argument('--test', action='store_true', help='just simple test')
args = parser.parse_args()
cfg.GPU_ID = args.gpu
cfg.NET_TYPE = args.net
cfg.USE_DETECT = args.detect
cfg.WORKER_N = args.worker
if args.test:
test()
if args.wider:
gen_wider()
if args.celeba:
gen_celeba()
|
<reponame>luizgfalqueto/algNum1
import math
def read_a(A, n): # Função para ler os elementos para preencher a Matriz A
for i in range(0, n):
for j in range(0, n):
A[i][j] = float(input("Digite o valor de A[{}][{}]:".format(i + 1, j + 1)))
return A
def read_b(b, n): # Função para ler os elementos para preencher o vetor b
for i in range(0, n):
b[i] = float(input("Digite o valor de b[{}]:".format(i + 1)))
return b
def cria_matriz(n): # Cria matriz de ordem nXn
matriz = [[0 for i in range(n)]for j in range(n)]
return matriz
def cria_vetor(n): # Cria vetor de tamanho n
vetor = [0 for j in range(n)]
return vetor
def copia_vetor(v1, v2, n): # Função que copia valores de v2 para v1 para aproximação da solução
for i in range(0, n):
v1[i] = v2[i]
return v1
def imprime_matriz(n, matriz):
for l in range(0, n):
for c in range(0, n):
print(f'[ {matriz[l][c]} ]', end='')
print()
def imprime_vetor(n, vetor):
for l in range(0, n):
print('[ %.3f ]' % vetor[l])
print()
def matriz_coef(A, b, n): # Função que gera a matriz C e vetor g de X = Cx + g
C = [[0 for i in range(n)] for j in range(n)]
for i in range(n):
for j in range(n):
if i == j:
C[i][j] = b[j] / A[i][j]
else:
C[i][j] = -1 * A[i][j] / A[i][i]
return C
def gaussJacobi(C, X, n): # Função que calcula o vetor aproximado da solução
for i in range(n):
soma = 0
for k in range(n):
if k != i:
soma += C[i][k] * X[k]
X[i] = soma + C[i][i]
return X
def erro(X, aux, n): # Função que calcula o erro relativo do problema, servindo como teste de parada
maiorx = -10000
maioraux = -10000
for i in range(n):
if math.fabs(X[i]) > maiorx:
maiorx = math.fabs(X[i])
if math.fabs(aux[i]) > maioraux:
maioraux = math.fabs(aux[i])
dif = math.fabs(maioraux - maiorx)
if maioraux == 0:
return 1.0
else:
return dif / maioraux
def convergencia(A, n):
betas = []
soma=0
for i in range(1, n):
soma += A[0][i]
betas.append(soma/A[0][0])
for i in range(n):
soma = 0
for j in range(1, i-1):
soma += math.fabs(A[i][j]) * betas[j]
for k in range(i+1, n):
soma += math.fabs(A[i][k])
betas.append(soma/A[i][i])
maior = -10000
for i in range(len(betas)):
if betas[i] > maior:
maior = betas[i]
return maior
def main():
stop = 0.005
d = 1.0
n = int(input('Informe a ordem da Matriz: '))
A = cria_matriz(n)
b = cria_vetor(n)
while True:
A = read_a(A, n)
if convergencia(A, n) >= 1:
print('Não se pode ter certeza sobre a convergencia da matriz A!')
print('Favor, tentar outra vez!')
if not convergencia(A, n) >= 1:
break
b = read_b(b, n)
ite = int(input('Informe o numero de iterações: '))
C = matriz_coef(A, b, n)
X = [0 for i in range(n)]
aux = [0 for i in range(n)]
while ((ite > 0) and (d > stop)):
ite -= 1
copia_vetor(aux, X, n)
X = gaussJacobi(C, X, n)
d = erro(X, aux, n)
copia_vetor(X, aux, n)
print("Solução aproximada: ")
imprime_vetor(n, X) # Imprimindo solução aproximada
if __name__ == '__main__':
main()
|
"""
Test to negative scenarios for a scaling policy.
"""
from test_repo.autoscale.fixtures import AutoscaleFixture
from autoscale.status_codes import HttpStatusCodes
import sys
class ScalingPolicyNegative(AutoscaleFixture):
"""
Verify negative scenarios for a scaling policy
"""
@classmethod
def setUpClass(cls):
"""
Create a scaling group.
"""
super(ScalingPolicyNegative, cls).setUpClass()
cls.negative_num = -0.1
create_resp = cls.autoscale_behaviors.create_scaling_group_min()
cls.group = create_resp.entity
cls.resources.add(cls.group.id,
cls.autoscale_client.delete_scaling_group)
@classmethod
def tearDownClass(cls):
"""
Delete the scaling group.
"""
super(ScalingPolicyNegative, cls).tearDownClass()
def test_scaling_policy_nonexistant(self):
"""
Negative Test: A newly created scaling group does not contain a scaling policy,
by default
"""
create_resp = self.autoscale_behaviors.create_scaling_group_min()
group = create_resp.entity
self.resources.add(group.id,
self.autoscale_client.delete_scaling_group)
list_policy_resp = self.autoscale_client.list_policies(group.id)
list_policy = list_policy_resp.entity
self.assertEquals(list_policy_resp.status_code, 200,
msg='List scaling policies failed with {0}'
.format(list_policy_resp.status_code))
self.validate_headers(list_policy_resp.headers)
self.assertEquals(list_policy, [],
msg='Some scaling policies exist on the scaling group')
def test_scaling_policy_name_blank(self):
"""
Negative Test: Scaling policy should not get created with an empty name.
"""
expected_status_code = HttpStatusCodes.BAD_REQUEST
error_create_resp = self.autoscale_client.create_policy(group_id=self.group.id,
name='',
cooldown=self.sp_cooldown,
change=self.sp_change,
policy_type=self.sp_policy_type)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create scaling policy succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create scaling policy with invalid request returned: {0}'
.format(create_error))
def test_scaling_policy_name_whitespace(self):
"""
Negative Test: Scaling policy should not get created with name as whitespace.
"""
expected_status_code = HttpStatusCodes.BAD_REQUEST
error_create_resp = self.autoscale_client.create_policy(group_id=self.group.id,
name=' ',
cooldown=self.sp_cooldown,
change=self.sp_change,
policy_type=self.sp_policy_type)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create scaling policy succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create scaling policy with invalid request returned: {0}'
.format(create_error))
def test_scaling_policy_cooldown_lessthan_zero(self):
"""
Negative Test: Scaling policy should not get created with
cooldown less than zero.
"""
expected_status_code = HttpStatusCodes.BAD_REQUEST
error_create_resp = self.autoscale_client.create_policy(group_id=self.group.id,
name=self.sp_name,
cooldown='-00.01',
change=self.sp_change,
policy_type=self.sp_policy_type)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create scaling policy succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create scaling policy with invalid request returned: {0}'
.format(create_error))
def test_scaling_policy_change_lessthan_zero(self):
"""
Negative Test: Scaling policy should not get created with change less than zero
"""
expected_status_code = HttpStatusCodes.BAD_REQUEST
error_create_resp = self.autoscale_client.create_policy(group_id=self.group.id,
name=self.sp_name,
cooldown=self.sp_cooldown,
change='0.001')
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create scaling policy succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create scaling policy with invalid request returned: {0}'
.format(create_error))
def test_get_invalid_policy_id(self):
"""
Negative Test: Get policy with invalid policy id should fail with
resource not found 404
"""
policy = 13344
expected_status_code = HttpStatusCodes.NOT_FOUND
error_create_resp = self.autoscale_client.get_policy_details(group_id=self.group.id,
policy_id=policy)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create policies succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create policies with invalid request returned: {0}'
.format(create_error))
def test_update_invalid_policy_id(self):
"""
Negative Test: Update policy with invalid policy id should fail with
resource not found 404
"""
policy = 13344
expected_status_code = HttpStatusCodes.NOT_FOUND
error_create_resp = self.autoscale_client.update_policy(group_id=self.group.id,
policy_id=policy,
name=self.sp_name,
cooldown=self.sp_cooldown,
change=self.sp_change,
policy_type=self.sp_policy_type)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create policies succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create policies with invalid request returned: {0}'
.format(create_error))
def test_get_policy_after_deletion(self):
"""
Negative Test: Get policy when policy is deleted should fail with
resource not found 404
"""
policy = self.autoscale_behaviors.create_policy_min(self.group.id)
del_resp = self.autoscale_client.delete_scaling_policy(group_id=self.group.id,
policy_id=policy['id'])
self.assertEquals(del_resp.status_code, 204, msg='Delete policy failed')
expected_status_code = HttpStatusCodes.NOT_FOUND
error_create_resp = self.autoscale_client.get_policy_details(group_id=self.group.id,
policy_id=policy['id'])
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create policies succeeded with invalid request: {0}'
.format(error_create_resp.status_code))
self.assertTrue(create_error is None,
msg='Create policies with invalid request returned: {0}'
.format(create_error))
def test_update_policy_after_deletion(self):
"""
Negative Test: Update policy when policy is deleted should fail with
resource not found 404
"""
policy = self.autoscale_behaviors.create_policy_min(self.group.id)
del_resp = self.autoscale_client.delete_scaling_policy(group_id=self.group.id,
policy_id=policy['id'])
self.assertEquals(del_resp.status_code, 204, msg='Delete policy failed')
expected_status_code = HttpStatusCodes.NOT_FOUND
error_create_resp = self.autoscale_client.update_policy(group_id=self.group.id,
policy_id=policy['id'],
name=self.sp_name,
cooldown=self.sp_cooldown,
change=self.sp_change,
policy_type=self.sp_policy_type)
create_error = error_create_resp.entity
self.assertEquals(error_create_resp.status_code, expected_status_code,
msg='Create policies succeeded with invalid request: {0},'
'policy/groupid: {1} / {2}'
.format(error_create_resp.status_code, self.group.id, policy['id']))
self.assertTrue(create_error is None,
msg='Create policies with invalid request returned: {0}'
.format(create_error))
def test_scaling_policy_maxint_change(self):
"""
Negative Test: Test scaling policy when change is maxint does not fail with 400
"""
change = sys.maxint
create_resp = self.autoscale_client.create_policy(
group_id=self.group.id,
name=self.sp_name,
cooldown=self.gc_cooldown,
change=change,
policy_type=self.sp_policy_type)
policy = create_resp.entity
self.assertEquals(create_resp.status_code, 201,
msg='Create scaling policy failed with maxint as change: {0}'
.format(create_resp.status_code))
self.assertTrue(policy is not None,
msg='Create scaling policy failed: {0}'
.format(policy))
def test_scaling_policy_max_cooldown(self):
"""
Negative Test: Create scaling policy with cooldown over max fails with response code 400
"""
create_resp = self.autoscale_client.create_policy(
group_id=self.group.id,
name=self.sp_name,
cooldown=self.max_cooldown + 1,
change=self.sp_change,
policy_type=self.sp_policy_type)
self.assertEquals(create_resp.status_code, 400,
msg='Created scaling policy with cooldown over 24 hrs with response code: {0}'
.format(create_resp.status_code))
def test_scaling_policy_invalid_type(self):
"""
Negative Test: Create scaling policy with invalid type will result in response code 400
"""
create_resp = self.autoscale_client.create_policy(
group_id=self.group.id,
name=self.sp_name,
cooldown=self.sp_cooldown,
change=self.sp_change,
policy_type='myowntype')
self.assertEquals(create_resp.status_code, 400,
msg='Created scaling policy with invalid type with response code: {0}'
.format(create_resp.status_code))
|
<reponame>MKlauck/qcomp2020
from benchmark import Benchmark
from invocation import Invocation
from execution import Execution
from utility import *
from shutil import copyfile
import sys, importlib
import tmptool
loaded = False
def assert_loaded():
if not loaded:
copyfile("tool.py", os.path.join(sys.path[0], "tmptool.py"))
importlib.reload(sys.modules["tmptool"])
def get_name():
""" should return the name of the tool as listed on http://qcomp.org/competition/2020/"""
return "modes"
def is_benchmark_supported(benchmark : Benchmark):
"""returns True if the provided benchmark is supported by the tool and if the given benchmark should appear on the generated benchmark list"""
short = benchmark.get_model_short_name()
prop = benchmark.get_property_name()
prop_type = benchmark.get_short_property_type()
if(short == "bluetooth" # multiple initial states
or short == "herman" # multiple initial states
or short == "oscillators" # model file too large, cannot be parsed
or short == "repudiation_malicious" # open clock constraints
):
return False
return True
def add_invocations(invocations, track_id, default_cmd):
default_inv = Invocation()
default_inv.identifier = "default"
default_inv.track_id = track_id
default_inv.add_command(default_cmd)
invocations.append(default_inv)
# Run with python3 qcomp2020_generate_invocations.py
def get_invocations(benchmark : Benchmark):
"""
Returns a list of invocations that invoke the tool for the given benchmark.
It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
If this benchmark is not supported, an empty list has to be returned.
"""
if not is_benchmark_supported(benchmark):
return []
short = benchmark.get_model_short_name()
prop = benchmark.get_property_name()
prop_type = benchmark.get_short_property_type()
params = benchmark.get_parameter_values_string()
instance = short + "." + params
size = benchmark.get_num_states_tweak()
invocations = []
benchmark_settings = "--props " + benchmark.get_property_name()
if benchmark.get_open_parameter_def_string() != "":
benchmark_settings += " -E " + benchmark.get_open_parameter_def_string()
default_base = "modes/modest modes --unsafe --max-run-length 0 " + benchmark.get_janifilename() + " " + benchmark_settings + " -O out.txt Minimal"
#
# Track "probably-epsilon-correct"
#
precision = "5e-2"
default_cmd = default_base + " --width $PRECISION --relative-width"
if benchmark.is_dtmc() or benchmark.is_ctmc():
add_invocations(invocations, "probably-epsilon-correct", default_cmd.replace("$PRECISION", precision))
#
# Track "often-epsilon-correct"
#
precision = "1e-3"
if benchmark.is_dtmc() or benchmark.is_ctmc():
add_invocations(invocations, "often-epsilon-correct", default_cmd.replace("$PRECISION", precision))
#
# Track "often-epsilon-correct-10-min"
#
if benchmark.is_dtmc() or benchmark.is_ctmc():
default_cmd = default_base + " -N 2147483647"
else:
default_cmd = default_base + " --width 2e-2 --relative-width --lss Interruptible 1000000 -L 1000"
if benchmark.is_pta():
default_cmd += " --digital-clocks"
add_invocations(invocations, "often-epsilon-correct-10-min", default_cmd)
#
# Done
#
return invocations
def get_result(benchmark : Benchmark, execution : Execution):
"""
Returns the result of the given execution on the given benchmark.
This method is called after executing the commands of the associated invocation.
One can either find the result in the tooloutput (as done here) or
read the result from a file that the tool has produced.
The returned value should be either 'true', 'false', a decimal number, or a fraction.
"""
if not os.path.exists("out.txt"):
return None
with open("out.txt", "r") as out_file:
log = out_file.read()
pos = log.find("\": ")
if pos < 0:
return None
pos = pos + len("\": ")
eol_pos = log.find("\n", pos)
result = log[pos:eol_pos]
return result
|
"""
Created on Sun Jul 08 05:03:01 2018
@Project Title: Learning and Summarizing Graphical Models using Eigen Analysis of Graph Laplacian: An Application in Analysis of Multiple Chronic Conditions
@Project: EAGL (Simplification Based Graph Summarization)
@author: <NAME>
"""
# Import Libraries
import networkx as nx
import numpy as np
from scipy import sparse as sp
from scipy import io as sc
import matplotlib.pyplot as plt
import warnings
import os
# Defining the functions
## Normalizing a MATRIX
# This will be used if we want to normalize the attained graph laplacian
# Input : Laplacian Matrix
# Output : Normalized Laplacian Matrix
def NormalizeMatrix(Matrix):
row_sums = Matrix.sum(axis=1)
return Matrix / row_sums
## Function to Extract the Tree from the DAG
# Extract the Depth-first tree or Breadth-first tree.
# Input:
# DAG : Input DAG
# Tree_Option : 'dfs' or 'bfs'
# StartingNode : Starting point of tree traversing
# Output:
# tree_matrix : Extracted Tree from DAG
# !! A future improvent can be, instead of StartingNode input, we can levarage input/output degree
def TreeExtraction(DAG,Tree_Option,StartingNode):
DAG = sp.csr_matrix(DAG) # Compressed Sparse Row matrix
G=nx.DiGraph(DAG) # Create The Directed Graph
## Switching between DFS and BFS
if Tree_Option=='dfs':
tree = nx.dfs_tree(G, StartingNode) # Extract the DFS Tree
else:
tree = nx.bfs_tree(G, StartingNode) # Extract the BFS Tree
tree_matrix=nx.to_numpy_matrix(tree) # Graph adjacency matrix as a NumPy matrix.
# sc.savemat('TemporaryStore.mat', {'Tree_DAG':tree_matrix}) # Delete this temporary Matrix at the end of analysis
return tree_matrix,tree
## Function for Eigenvalue Entry
# Calculates the Top_k eigen value.
# Input:
# DAG : Input DAG
# DAG_Size : Size of the DAG (Future Improvement: Extract directly from DAG)
# Top_k_Eigenvalue_Number : Which Eigen value to extract (1st or 2nd)
# norm : Either to normalize the Laplacian or not.
# Output:
# EigenValue : Eigen value of the input DAG
# Top_k_Eigenvector : Eigen vector for the eigen value
# Top_k_Eigenvalue_Index : Index of the Top_k eigen vector
# Laplacian : The laplacian matrix for the provided
def eigenDAG(DAG,DAG_Size,Top_k_Eigenvalue_Number,norm = False):
# Matrix to Sparse
DAG = sp.csr_matrix(DAG)
# Create Graph
G=nx.DiGraph(DAG) # Create The Directed Graph
# Calculate Directed laclacian
Laplacian=nx.directed_laplacian_matrix(G, nodelist=None, weight='weight', walk_type=None, alpha=0.95)
# Normalize the matrix
if norm:
Laplacian=NormalizeMatrix(Laplacian)
# Eigen value of Laplacian
eigenvalues,eigenvectors = np.linalg.eig(Laplacian)
# Sorting the eigenvalues
np.matrix.sort(eigenvalues)
# Top K EigenValues
Top_k_Eigenvalue=eigenvalues[(DAG_Size-Top_k_Eigenvalue_Number):DAG_Size]
## If the test is for 2nd Eigen Value then this line will choose the 2nd one otherwise 1st one
Top_k_Eigenvalue=Top_k_Eigenvalue[0]
# Getting the index for Max value
Top_k_Eigenvalue_Index = sorted(range(len(eigenvalues)), key=lambda i: eigenvalues[i])[-2:]
# List of Top Eigen Vactors
Top_k_Eigenvector=np.zeros
Top_k_Eigenvector=eigenvectors[:,Top_k_Eigenvalue_Index[0]]
for i in range(Top_k_Eigenvalue_Number-1):
Top_k_Eigenvector=np.column_stack((Top_k_Eigenvector,eigenvectors[:,Top_k_Eigenvalue_Index[i+1]]))
return Top_k_Eigenvalue,Top_k_Eigenvector,Top_k_Eigenvalue_Index,Laplacian
## Store Eigen Values for all the Test Case
# Calculate Eigen Values for Edge Deletion
# Input:
# DAG : Input DAG
# DAG_Size : Size of the DAG (Future Improvement: Extract directly from DAG)
# Top_k_Eigenvalue_Number : Which Eigen value to extract (1st or 2nd)
# Output:
# EigenChange : list of eigen changes due to edge deletion
# DAG_Track : Tracking Matrix which will keep track of eigen changes due to edge deletion
# OriginalEigen : Original Eigen value of the provided DAG
def eigenStore(DAG,DAG_Size,Top_k_Eigenvalue_Number):
OriginalEigen,Original_Top_k_Eigenvector,Original_Top_k_Eigenvalue_Index,Original_Laplacian=eigenDAG(DAG,DAG_Size,Top_k_Eigenvalue_Number)
EigenStoreSize=np.count_nonzero(DAG)#np.sum(DAG).astype(int)
# Define Initials
# Tracking the DAG Edges
DAG_Track=np.zeros((DAG_Size,DAG_Size))
# Tracking the Eigen Changes
if Top_k_Eigenvalue_Number==1:
EigenChange=np.zeros((Top_k_Eigenvalue_Number , EigenStoreSize)) # 1st Eigen
elif Top_k_Eigenvalue_Number==2:
EigenChange=np.zeros((Top_k_Eigenvalue_Number-1, EigenStoreSize)) # 2nd Eigen
# Save the DAG as a Dummy mat file
sc.savemat('Dummy_DAG.mat', {'DAG':DAG})
count=0;
for i in range(DAG_Size):
for j in range(DAG_Size):
# Load DAG
DAG=sc.loadmat('Dummy_DAG.mat')
DAG=DAG['DAG']
if DAG[i,j]>0:
DAG[i,j]=0
Top_k_Eigenvalue,Top_k_Eigenvector,Top_k_Eigenvalue_Index,Laplacian=eigenDAG(DAG,DAG_Size,Top_k_Eigenvalue_Number)
EigenChange[:,count]=np.absolute(OriginalEigen-Top_k_Eigenvalue)/OriginalEigen*100 #*10000
DAG_Track[i,j]=EigenChange[:,count]
count=count+1
# print (count)
return EigenChange,DAG_Track,OriginalEigen
## Updated DAG from DAG Track
# Based on the tracked changes, this will create the new DAG for next iteration. This is based on Algorithm 1 of the paper.
# Input:
# DAG : Input DAG
# DAG_Size : DAG Size
# DAG_Track : Tracking Matrix which will keep track of eigen changes due to edge deletion
# EigenChange : Change in eigen value
# OriginalEigen : Eigen Value of Original DAG
# CompressionPercent : Desired compression Percentage
# Output:
# DAG_Updated : Updated DAG with removed Edge
def NewDAG_EigenBased(DAG_Size,DAG_Track,EigenChange,OriginalEigen,CompressionPercent,DAG):
DAG_Updated=np.zeros((DAG_Size,DAG_Size))
for i in range(DAG_Size):
for j in range(DAG_Size):
if DAG_Track[i,j]>(OriginalEigen*CompressionPercent):
DAG_Updated[i,j]=DAG[i,j]
return DAG_Updated
## Updated DAG from DAG Track
# Based on the tracked changes, this will create the new DAG for next iteration. This is based on Algorithm 2 of the paper.
# Input:
# DAG : Input DAG
# DAG_Size : DAG Size
# DAG_Track : Tracking Matrix which will keep track of eigen changes due to edge deletion
# EigenChange : Change in eigen value
# OriginalEigen : Eigen Value of Original DAG
# Output:
# DAG_Updated : Updated DAG with removed Edge
def NewDAG_IterationBased(DAG_Size,DAG_Track,EigenChange,OriginalEigen,DAG):
DAG_Updated=np.zeros((DAG_Size,DAG_Size))
for i in range(DAG_Size):
for j in range(DAG_Size):
if DAG_Track[i,j]>np.min(EigenChange):
DAG_Updated[i,j]=DAG[i,j]
return DAG_Updated
## Any Edge on the Tree won't be deleted
# This makes sure, No edge on the tree gets deleted (Future Improvement: Include this condition on Edge deletion test, function: eigenStore
# Input:
# tree_matrix : Extracted Tree from the original DAG
# Updated_DAG : The updated DAG with/without the tree DAG
# DAG_Size : Size of the DAG
# DAG : DAG
# Output:
# Updated_Tree_DAG : Updated DAG with tree DAG
def TreeConnecting(tree_matrix,Updated_DAG,DAG_Size,DAG):
Updated_Tree_DAG = Updated_DAG
for i in range(DAG_Size):
for j in range (DAG_Size):
if (tree_matrix[i,j]>1):
if (Updated_DAG[i,j]==0):
Updated_Tree_DAG[i,j]=DAG[i,j]
return Updated_Tree_DAG
#Plotting the Graph from Adjacency matrix
def plot_Graph(DAG,pos):
G = nx.DiGraph(DAG) # Create default Graph
nx.draw(G,pos = pos)
nx.draw_networkx_labels(G,pos=pos)
return
# Plotting the reduction of Edges at each iteration
def plot_Edge_Reduction(NumberofEdges,LabelName,mark,Color):
## Plotting the Number of Edges Left
plt.plot(NumberofEdges.T,'gx-',label=LabelName,marker=mark,color=Color)
plt.grid(True)
plt.legend(loc=1)
plt.title('Graph Compression for Different DAG\'s')
plt.ylabel('Number of Edges')
plt.xlabel('Iteration')
########################################################################################################################
## Combining Everything ##
########################################################################################################################
# Algorithm 2:
# Input:
# DAG : Adjacency Matrix of the Graphs
# Method :'False' = Single edge reduction (Default)
# 'True' = Multiple edge reduction
def GraphCompression(DAG,Method='False'):
# DAG Size
DAG_Size=DAG.shape[1] # Retrive the DAG Size
#User Inputs
#warnings.warn('Extract The DFS/BFS Tree (if more than 1 source Node, Use the Dummy Node added tree)!')
Tree_Connect = input("Enter if tree connection to be maintained (True or False): ") # 'dfs' or 'bfs'
if Tree_Connect=='True':
Tree_Option = input("Enter Tree Extraction method (dfs or bfs): ") # 'dfs' or 'bfs'
StartingNode = int(input("Enter Traversing Start Node(0 to "+str(DAG_Size-1)+"):"))# Starting Node
# Extract Tree Matrix
tree_matrix,tree=TreeExtraction(DAG,Tree_Option,StartingNode)
IterationNumber = int(input("Enter Number of Iterations: ")) # User input: Number of Iteration
Top_k_Eigenvalue_Number = int(input("Enter for which Eigenvalue (1st or 2nd) perform the calculation: ")) # Eigenvalue (1st or 2nd)
NumberofEdges=np.zeros((1,IterationNumber)) # Edge Reduction Count
if Method == 'True':
CompressionPercent = float(input("Enter Cut-off Value: ")) # User Input: Compression Percent
for i in range(IterationNumber):
NumberofEdges[:,i]=np.count_nonzero(DAG)
EigenValue,DAG_Track,OriginalEigen=eigenStore(DAG,DAG_Size,Top_k_Eigenvalue_Number)
# GS Method
if Method == 'True':
DAG=NewDAG_EigenBased(DAG_Size,DAG_Track,EigenValue,OriginalEigen,CompressionPercent,DAG)
else:
DAG=NewDAG_IterationBased(DAG_Size,DAG_Track,EigenValue,OriginalEigen,DAG)
# Do we consider the tree case or not?
if Tree_Connect=='True':
DAG2=TreeConnecting(tree_matrix,DAG,DAG_Size,DAG)
else:
DAG2=DAG
return DAG2,EigenValue,NumberofEdges |
<reponame>yanzhaochang/PSATools-Python<filename>src/data_imexporter/parse_psse_pf.py
import sys
sys.path.append('..')
import apis
from apis import apis_system
def init_powerflow_data(file):
'''
Initialize the power flow data, parse the data of each component from the file and import it into memory.
Args:
file, str, power flow raw file.
Rets: None
'''
data, index, a = [], [], 0
with open(file) as f:
while True:
text = f.readline().rstrip('\n')
if not text:
break
if text[0] == '0' and text[1] == ' ' and a > 2:
index.append(a)
data.append(text)
a = a + 1
parse_rate(data[0])
parse_bus(data[3:index[0]])
parse_load(data[index[0] + 1 : index[1]])
parse_shunt(data[index[1] + 1 : index[2]])
parse_generator(data[index[2] + 1 : index[3]])
parse_line(data[index[3] + 1 : index[4]])
parse_transformer(data[index[4] + 1 : index[5]])
parse_hvdc(data[index[6] + 1 : index[7]])
check_network()
renumber_bus_node()
return
def check_network():
'''
Check the network structure, fan and photovoltaic bus type. If it is PV, change it to PQ node
Args: None
Rets: None
'''
wt_generators = apis.get_all_devices('WT GENERATOR')
for wt_generator in wt_generators:
IDE = apis.get_device_data(wt_generator, 'BUS', 'IDE')
if IDE == 2:
apis.set_device_data(wt_generator, 'BUS', 'IDE', 1)
pv_units = apis.get_all_devices('PV UNIT')
for pv_unit in pv_units:
IDE = apis.get_device_data(pv_unit, 'BUS', 'IDE')
if IDE == 2:
apis.set_device_data(pv_unit, 'BUS', 'IDE', 1)
return
def renumber_bus_node():
'''
Renumber bus node.
Args: None
Rets:None
'''
PQ = []
PV = []
swing = []
buses = apis.get_all_devices('BUS')
for bus in buses:
IDE = apis.get_device_data(bus, 'BUS', 'IDE')
if IDE == 1:
PQ.append(bus)
elif IDE == 2:
PV.append(bus)
elif IDE == 3:
swing.append(bus)
else:
pass
node = PQ + PV + swing
apis_system.set_system_base_data('BusSqNum', node)
return
def parse_rate(data):
'''
Parse system base reference capacity and reference frequency
Args:
data, list, base data.
Rets: None
'''
temp = data.split(',')
apis_system.set_system_base_data('SBASE', float(temp[1]))
apis_system.set_system_base_data('BASFRQ', float(temp[5].split('/')[0]))
return
def parse_bus(data):
'''
Parse data of bus steady state model and add to the database.
Args:
data, list, bus data.
Rets: None
'''
for item in data:
temp = item.split(',')
device_index = int(temp[0])
apis.add_device(device_index, 'BUS')
apis.set_device_data(device_index, 'BUS', 'BASKV', float(temp[2]))
apis.set_device_data(device_index, 'BUS', 'IDE', int(temp[3]))
apis.set_device_data(device_index, 'BUS', 'VM', float(temp[7]))
apis.set_device_data(device_index, 'BUS', 'VA', float(temp[8]))
return
def parse_load(data):
'''
Parse data of load steady state model and add to the database, only including constant power load.
Args:
data, list, load data.
Rets: None
'''
for item in data:
temp = item.split(',')
device_index = int(temp[0])
apis.add_device(device_index, 'LOAD')
apis.set_device_data(device_index, 'LOAD', 'PL', float(temp[5]))
apis.set_device_data(device_index, 'LOAD', 'QL', float(temp[6]))
return
def parse_shunt(data):
'''
Parse data of shunt steady state model and add to the database.
Args:
data, list, shunt data.
Rets: None
'''
for item in data:
temp = item.split(',')
device_index = int(temp[0])
apis.add_device(device_index, 'SHUNT')
apis.set_device_data(device_index, 'SHUNT', 'BL', float(temp[4]))
return
def parse_generator(data):
'''
Parse data of generator steady state model and add to the database.
Args:
data, list, generator data.
Rets: None
'''
for item in data:
temp = item.split(',')
device_index = int(temp[0])
if int(temp[26]) == 3:
apis.add_device(device_index, 'WT GENERATOR')
apis.set_device_data(device_index, 'WT GENERATOR', 'PG', float(temp[2]))
apis.set_device_data(device_index, 'WT GENERATOR', 'QG', float(temp[3]))
apis.set_device_data(device_index, 'WT GENERATOR', 'QT', float(temp[4]))
apis.set_device_data(device_index, 'WT GENERATOR', 'QB', float(temp[5]))
apis.set_device_data(device_index, 'WT GENERATOR', 'VS', float(temp[6]))
apis.set_device_data(device_index, 'WT GENERATOR', 'MBASE', float(temp[8]))
apis.set_device_data(device_index, 'WT GENERATOR', 'ZR', float(temp[9]))
apis.set_device_data(device_index, 'WT GENERATOR', 'ZX', float(temp[10]))
apis.set_device_data(device_index, 'WT GENERATOR', 'PT', float(temp[16]))
apis.set_device_data(device_index, 'WT GENERATOR', 'PB', float(temp[17]))
elif int(temp[26]) == 2:
apis.add_device(device_index, 'PV UNIT')
apis.set_device_data(device_index, 'PV UNIT', 'PG', float(temp[2]))
apis.set_device_data(device_index, 'PV UNIT', 'QG', float(temp[3]))
apis.set_device_data(device_index, 'PV UNIT', 'QT', float(temp[4]))
apis.set_device_data(device_index, 'PV UNIT', 'QB', float(temp[5]))
apis.set_device_data(device_index, 'PV UNIT', 'VS', float(temp[6]))
apis.set_device_data(device_index, 'PV UNIT', 'MBASE', float(temp[8]))
apis.set_device_data(device_index, 'PV UNIT', 'ZR', float(temp[9]))
apis.set_device_data(device_index, 'PV UNIT', 'ZX', float(temp[10]))
apis.set_device_data(device_index, 'PV UNIT', 'PT', float(temp[16]))
apis.set_device_data(device_index, 'PV UNIT', 'PB', float(temp[17]))
elif int(temp[26]) == 0:
apis.add_device(device_index, 'GENERATOR')
apis.set_device_data(device_index, 'GENERATOR', 'PG', float(temp[2]))
apis.set_device_data(device_index, 'GENERATOR', 'QG', float(temp[3]))
apis.set_device_data(device_index, 'GENERATOR', 'QT', float(temp[4]))
apis.set_device_data(device_index, 'GENERATOR', 'QB', float(temp[5]))
apis.set_device_data(device_index, 'GENERATOR', 'VS', float(temp[6]))
apis.set_device_data(device_index, 'GENERATOR', 'MBASE', float(temp[8]))
apis.set_device_data(device_index, 'GENERATOR', 'ZR', float(temp[9]))
apis.set_device_data(device_index, 'GENERATOR', 'ZX', float(temp[10]))
apis.set_device_data(device_index, 'GENERATOR', 'PT', float(temp[16]))
apis.set_device_data(device_index, 'GENERATOR', 'PB', float(temp[17]))
else:
pass
return
def parse_line(data):
'''
Parse data of line steady state model and add to the database.
Args:
data, list, line data.
Rets: None
'''
for item in data:
temp = item.split(',')
device_index = (int(temp[0]), int(temp[1]), eval(temp[2]))
apis.add_device(device_index, 'LINE')
apis.set_device_data(device_index, 'LINE', 'R', float(temp[3]))
apis.set_device_data(device_index, 'LINE', 'X', float(temp[4]))
apis.set_device_data(device_index, 'LINE', 'B', float(temp[5]))
apis.set_device_data(device_index, 'LINE', 'BI', float(temp[10]))
apis.set_device_data(device_index, 'LINE', 'BJ', float(temp[12]))
return
def parse_transformer(data):
'''
Parse data of transformer steady state model and add to the database.
Args:
data, list, transformer data.
Rets: None
'''
data = iter(data)
for item in data:
rows = [item]
rows.append(next(data))
rows.append(next(data))
rows.append(next(data))
if int(item.split(',')[2]) != 0:
rows.append(next(data))
if len(rows) == 4: # Two winding transformer
temp = rows[0].split(',')
device_index = (int(temp[0]), int(temp[1]), int(temp[2]))
apis.add_device(device_index, 'TRANSFORMER')
apis.set_device_data(device_index, 'TRANSFORMER', 'MAG1', float(temp[7]))
apis.set_device_data(device_index, 'TRANSFORMER', 'MAG2', float(temp[8]))
temp = rows[1].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'R1_2', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'X1_2', float(temp[1]))
apis.set_device_data(device_index, 'TRANSFORMER', 'SBASE1_2', float(temp[2]))
temp = rows[2].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'WINDV1', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'NOMV1', float(temp[1]))
temp = rows[3].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'WINDV2', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'NOMV2', float(temp[1]))
else: # Three winding transformer
temp = rows[0].split(',')
device_index = (int(temp[0]), int(temp[1]), int(temp[2]))
apis.add_device(device_index, 'TRANSFORMER')
apis.set_device_data(device_index, 'TRANSFORMER', 'MAG1', float(temp[7]))
apis.set_device_data(device_index, 'TRANSFORMER', 'MAG2', float(temp[8]))
temp = rows[1].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'R1_2', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'X1_2', float(temp[1]))
apis.set_device_data(device_index, 'TRANSFORMER', 'SBASE1_2', float(temp[2]))
apis.set_device_data(device_index, 'TRANSFORMER', 'R2_3', float(temp[3]))
apis.set_device_data(device_index, 'TRANSFORMER', 'X2_3', float(temp[4]))
apis.set_device_data(device_index, 'TRANSFORMER', 'SBASE2_3', float(temp[5]))
apis.set_device_data(device_index, 'TRANSFORMER', 'R3_1', float(temp[6]))
apis.set_device_data(device_index, 'TRANSFORMER', 'X3_1', float(temp[7]))
apis.set_device_data(device_index, 'TRANSFORMER', 'SBASE3_1', float(temp[8]))
temp = rows[2].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'WINDV1', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'NOMV1', float(temp[1]))
temp = rows[3].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'WINDV2', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'NOMV2', float(temp[1]))
temp = rows[4].split(',')
apis.set_device_data(device_index, 'TRANSFORMER', 'WINDV3', float(temp[0]))
apis.set_device_data(device_index, 'TRANSFORMER', 'NOMV3', float(temp[1]))
return
def parse_hvdc(data):
'''
Parse data of hvdc steady state model and add to the database.
Args:
data, list, hvdc data.
Rets: None
'''
if len(data) == 0:
return
k = 0
while True:
row1 = data[k].split(',')
row2 = data[k+1].split(',')
row3 = data[k+2].split(',')
device_index = (int(row2[0]), int(row3[0]))
apis.add_device(device_index, 'HVDC')
apis.set_device_data(device_index, 'HVDC', 'RDC', float(row1[2]))
apis.set_device_data(device_index, 'HVDC', 'SETVL', float(row1[3]))
apis.set_device_data(device_index, 'HVDC', 'VSCHD', float(row1[4]))
apis.set_device_data(device_index, 'HVDC', 'NBR', int(row2[1]))
apis.set_device_data(device_index, 'HVDC', 'ANMXR', float(row2[2]))
apis.set_device_data(device_index, 'HVDC', 'ANMNR', float(row2[3]))
apis.set_device_data(device_index, 'HVDC', 'RCR', float(row2[4]))
apis.set_device_data(device_index, 'HVDC', 'XCR', float(row2[5]))
apis.set_device_data(device_index, 'HVDC', 'EBASR', float(row2[6]))
apis.set_device_data(device_index, 'HVDC', 'TRR', float(row2[7]))
apis.set_device_data(device_index, 'HVDC', 'TAPR', float(row2[8]))
apis.set_device_data(device_index, 'HVDC', 'TMXR', float(row2[9]))
apis.set_device_data(device_index, 'HVDC', 'TMNR', float(row2[10]))
apis.set_device_data(device_index, 'HVDC', 'STPR', float(row2[11]))
apis.set_device_data(device_index, 'HVDC', 'XCAPR', float(row2[16]))
apis.set_device_data(device_index, 'HVDC', 'NBI', int(row3[1]))
apis.set_device_data(device_index, 'HVDC', 'ANMXI', float(row3[2]))
apis.set_device_data(device_index, 'HVDC', 'ANMNI', float(row3[3]))
apis.set_device_data(device_index, 'HVDC', 'RCI', float(row3[4]))
apis.set_device_data(device_index, 'HVDC', 'XCI', float(row3[5]))
apis.set_device_data(device_index, 'HVDC', 'EBASI', float(row3[6]))
apis.set_device_data(device_index, 'HVDC', 'TRI', float(row3[7]))
apis.set_device_data(device_index, 'HVDC', 'TAPI', float(row3[8]))
apis.set_device_data(device_index, 'HVDC', 'TMXI', float(row3[9]))
apis.set_device_data(device_index, 'HVDC', 'TMNI', float(row3[10]))
apis.set_device_data(device_index, 'HVDC', 'STPI', float(row3[11]))
apis.set_device_data(device_index, 'HVDC', 'XCAPI', float(row3[16]))
k = k + 3
if k >= len(data):
break
return |
# -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'grading2.ui'
##
## Created by: Qt User Interface Compiler version 5.15.0
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import (QCoreApplication, QDate, QDateTime, QMetaObject,
QObject, QPoint, QRect, QSize, QTime, QUrl, Qt)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont,
QFontDatabase, QIcon, QKeySequence, QLinearGradient, QPalette, QPainter,
QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
class Ui_Grading2(object):
def setupUi(self, Grading2):
if not Grading2.objectName():
Grading2.setObjectName(u"Grading2")
Grading2.resize(358, 218)
self.centralwidget = QWidget(Grading2)
self.centralwidget.setObjectName(u"centralwidget")
self.glMain = QGridLayout(self.centralwidget)
self.glMain.setObjectName(u"glMain")
self.lbSubject = QLabel(self.centralwidget)
self.lbSubject.setObjectName(u"lbSubject")
sizePolicy = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.lbSubject.sizePolicy().hasHeightForWidth())
self.lbSubject.setSizePolicy(sizePolicy)
self.glMain.addWidget(self.lbSubject, 0, 0, 2, 1)
self.lb11 = QLabel(self.centralwidget)
self.lb11.setObjectName(u"lb11")
sizePolicy1 = QSizePolicy(QSizePolicy.Ignored, QSizePolicy.Fixed)
sizePolicy1.setHorizontalStretch(0)
sizePolicy1.setVerticalStretch(0)
sizePolicy1.setHeightForWidth(self.lb11.sizePolicy().hasHeightForWidth())
self.lb11.setSizePolicy(sizePolicy1)
self.lb11.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb11, 0, 1, 1, 1)
self.lb12 = QLabel(self.centralwidget)
self.lb12.setObjectName(u"lb12")
sizePolicy1.setHeightForWidth(self.lb12.sizePolicy().hasHeightForWidth())
self.lb12.setSizePolicy(sizePolicy1)
self.lb12.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb12, 0, 2, 1, 1)
self.lb13 = QLabel(self.centralwidget)
self.lb13.setObjectName(u"lb13")
sizePolicy1.setHeightForWidth(self.lb13.sizePolicy().hasHeightForWidth())
self.lb13.setSizePolicy(sizePolicy1)
self.lb13.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb13, 0, 3, 1, 1)
self.lb21 = QLabel(self.centralwidget)
self.lb21.setObjectName(u"lb21")
sizePolicy1.setHeightForWidth(self.lb21.sizePolicy().hasHeightForWidth())
self.lb21.setSizePolicy(sizePolicy1)
self.lb21.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb21, 1, 1, 1, 1)
self.lb22 = QLabel(self.centralwidget)
self.lb22.setObjectName(u"lb22")
sizePolicy1.setHeightForWidth(self.lb22.sizePolicy().hasHeightForWidth())
self.lb22.setSizePolicy(sizePolicy1)
self.lb22.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb22, 1, 2, 1, 1)
self.lb23 = QLabel(self.centralwidget)
self.lb23.setObjectName(u"lb23")
sizePolicy1.setHeightForWidth(self.lb23.sizePolicy().hasHeightForWidth())
self.lb23.setSizePolicy(sizePolicy1)
self.lb23.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lb23, 1, 3, 1, 1)
self.lbAns = QLabel(self.centralwidget)
self.lbAns.setObjectName(u"lbAns")
sizePolicy.setHeightForWidth(self.lbAns.sizePolicy().hasHeightForWidth())
self.lbAns.setSizePolicy(sizePolicy)
self.glMain.addWidget(self.lbAns, 2, 0, 2, 1)
self.lnAns11 = QLineEdit(self.centralwidget)
self.lnAns11.setObjectName(u"lnAns11")
sizePolicy.setHeightForWidth(self.lnAns11.sizePolicy().hasHeightForWidth())
self.lnAns11.setSizePolicy(sizePolicy)
self.lnAns11.setMaximumSize(QSize(100, 24))
self.lnAns11.setStyleSheet(u"padding:16px")
self.lnAns11.setMaxLength(9)
self.lnAns11.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns11, 2, 1, 1, 1)
self.lnAns12 = QLineEdit(self.centralwidget)
self.lnAns12.setObjectName(u"lnAns12")
sizePolicy.setHeightForWidth(self.lnAns12.sizePolicy().hasHeightForWidth())
self.lnAns12.setSizePolicy(sizePolicy)
self.lnAns12.setMaximumSize(QSize(100, 24))
self.lnAns12.setStyleSheet(u"padding:16px")
self.lnAns12.setMaxLength(9)
self.lnAns12.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns12, 2, 2, 1, 1)
self.lnAns13 = QLineEdit(self.centralwidget)
self.lnAns13.setObjectName(u"lnAns13")
sizePolicy.setHeightForWidth(self.lnAns13.sizePolicy().hasHeightForWidth())
self.lnAns13.setSizePolicy(sizePolicy)
self.lnAns13.setMaximumSize(QSize(100, 24))
self.lnAns13.setStyleSheet(u"padding:16px")
self.lnAns13.setMaxLength(9)
self.lnAns13.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns13, 2, 3, 1, 1)
self.lnAns21 = QLineEdit(self.centralwidget)
self.lnAns21.setObjectName(u"lnAns21")
sizePolicy.setHeightForWidth(self.lnAns21.sizePolicy().hasHeightForWidth())
self.lnAns21.setSizePolicy(sizePolicy)
self.lnAns21.setMaximumSize(QSize(100, 24))
self.lnAns21.setStyleSheet(u"padding:16px")
self.lnAns21.setMaxLength(9)
self.lnAns21.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns21, 3, 1, 1, 1)
self.lnAns22 = QLineEdit(self.centralwidget)
self.lnAns22.setObjectName(u"lnAns22")
sizePolicy.setHeightForWidth(self.lnAns22.sizePolicy().hasHeightForWidth())
self.lnAns22.setSizePolicy(sizePolicy)
self.lnAns22.setMaximumSize(QSize(100, 24))
self.lnAns22.setStyleSheet(u"padding:16px")
self.lnAns22.setMaxLength(9)
self.lnAns22.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns22, 3, 2, 1, 1)
self.lnAns23 = QLineEdit(self.centralwidget)
self.lnAns23.setObjectName(u"lnAns23")
sizePolicy.setHeightForWidth(self.lnAns23.sizePolicy().hasHeightForWidth())
self.lnAns23.setSizePolicy(sizePolicy)
self.lnAns23.setMaximumSize(QSize(100, 24))
self.lnAns23.setStyleSheet(u"padding:16px")
self.lnAns23.setMaxLength(9)
self.lnAns23.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnAns23, 3, 3, 1, 1)
self.lbCor = QLabel(self.centralwidget)
self.lbCor.setObjectName(u"lbCor")
sizePolicy.setHeightForWidth(self.lbCor.sizePolicy().hasHeightForWidth())
self.lbCor.setSizePolicy(sizePolicy)
self.glMain.addWidget(self.lbCor, 4, 0, 2, 1)
self.lnCor11 = QLineEdit(self.centralwidget)
self.lnCor11.setObjectName(u"lnCor11")
sizePolicy.setHeightForWidth(self.lnCor11.sizePolicy().hasHeightForWidth())
self.lnCor11.setSizePolicy(sizePolicy)
self.lnCor11.setMaximumSize(QSize(100, 24))
self.lnCor11.setStyleSheet(u"padding:16px")
self.lnCor11.setMaxLength(9)
self.lnCor11.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor11, 4, 1, 1, 1)
self.lnCor12 = QLineEdit(self.centralwidget)
self.lnCor12.setObjectName(u"lnCor12")
sizePolicy.setHeightForWidth(self.lnCor12.sizePolicy().hasHeightForWidth())
self.lnCor12.setSizePolicy(sizePolicy)
self.lnCor12.setMaximumSize(QSize(100, 24))
self.lnCor12.setStyleSheet(u"padding:16px")
self.lnCor12.setMaxLength(9)
self.lnCor12.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor12, 4, 2, 1, 1)
self.lnCor13 = QLineEdit(self.centralwidget)
self.lnCor13.setObjectName(u"lnCor13")
sizePolicy.setHeightForWidth(self.lnCor13.sizePolicy().hasHeightForWidth())
self.lnCor13.setSizePolicy(sizePolicy)
self.lnCor13.setMaximumSize(QSize(100, 24))
self.lnCor13.setStyleSheet(u"padding:16px")
self.lnCor13.setMaxLength(9)
self.lnCor13.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor13, 4, 3, 1, 1)
self.lnCor21 = QLineEdit(self.centralwidget)
self.lnCor21.setObjectName(u"lnCor21")
sizePolicy.setHeightForWidth(self.lnCor21.sizePolicy().hasHeightForWidth())
self.lnCor21.setSizePolicy(sizePolicy)
self.lnCor21.setMaximumSize(QSize(100, 24))
self.lnCor21.setStyleSheet(u"padding:16px")
self.lnCor21.setMaxLength(9)
self.lnCor21.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor21, 5, 1, 1, 1)
self.lnCor22 = QLineEdit(self.centralwidget)
self.lnCor22.setObjectName(u"lnCor22")
sizePolicy.setHeightForWidth(self.lnCor22.sizePolicy().hasHeightForWidth())
self.lnCor22.setSizePolicy(sizePolicy)
self.lnCor22.setMaximumSize(QSize(100, 24))
self.lnCor22.setStyleSheet(u"padding:16px")
self.lnCor22.setMaxLength(9)
self.lnCor22.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor22, 5, 2, 1, 1)
self.lnCor23 = QLineEdit(self.centralwidget)
self.lnCor23.setObjectName(u"lnCor23")
sizePolicy.setHeightForWidth(self.lnCor23.sizePolicy().hasHeightForWidth())
self.lnCor23.setSizePolicy(sizePolicy)
self.lnCor23.setMaximumSize(QSize(100, 24))
self.lnCor23.setStyleSheet(u"padding:16px")
self.lnCor23.setMaxLength(9)
self.lnCor23.setAlignment(Qt.AlignCenter)
self.glMain.addWidget(self.lnCor23, 5, 3, 1, 1)
self.widBot = QWidget(self.centralwidget)
self.widBot.setObjectName(u"widBot")
self.hlBot = QHBoxLayout(self.widBot)
self.hlBot.setObjectName(u"hlBot")
self.hlBot.setContentsMargins(0, 0, 0, 0)
self.btnBack = QPushButton(self.widBot)
self.btnBack.setObjectName(u"btnBack")
sizePolicy.setHeightForWidth(self.btnBack.sizePolicy().hasHeightForWidth())
self.btnBack.setSizePolicy(sizePolicy)
self.hlBot.addWidget(self.btnBack)
self.sp1 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.hlBot.addItem(self.sp1)
self.btnRe = QPushButton(self.widBot)
self.btnRe.setObjectName(u"btnRe")
sizePolicy.setHeightForWidth(self.btnRe.sizePolicy().hasHeightForWidth())
self.btnRe.setSizePolicy(sizePolicy)
self.hlBot.addWidget(self.btnRe)
self.sp2 = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
self.hlBot.addItem(self.sp2)
self.btnNext = QPushButton(self.widBot)
self.btnNext.setObjectName(u"btnNext")
sizePolicy.setHeightForWidth(self.btnNext.sizePolicy().hasHeightForWidth())
self.btnNext.setSizePolicy(sizePolicy)
self.hlBot.addWidget(self.btnNext)
self.glMain.addWidget(self.widBot, 6, 0, 1, 4)
Grading2.setCentralWidget(self.centralwidget)
self.retranslateUi(Grading2)
QMetaObject.connectSlotsByName(Grading2)
# setupUi
def retranslateUi(self, Grading2):
Grading2.setWindowTitle(QCoreApplication.translate("Grading2", u"\uac00\ucc44\uc810\uc785\ub825", None))
self.lbSubject.setText(QCoreApplication.translate("Grading2", u"\uacfc\n\ubaa9", None))
self.lb11.setText(QCoreApplication.translate("Grading2", u"1~5", None))
self.lb12.setText(QCoreApplication.translate("Grading2", u"6~10", None))
self.lb13.setText(QCoreApplication.translate("Grading2", u"11~15", None))
self.lb21.setText(QCoreApplication.translate("Grading2", u"16~20", None))
self.lb22.setText(QCoreApplication.translate("Grading2", u"21~25", None))
self.lb23.setText(QCoreApplication.translate("Grading2", u"26~30", None))
self.lbAns.setText(QCoreApplication.translate("Grading2", u"\uc751\n\ub2f5", None))
self.lnAns11.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnAns12.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnAns13.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnAns21.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnAns22.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnAns23.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lbCor.setText(QCoreApplication.translate("Grading2", u"\uc815\n\ub2f5", None))
self.lnCor11.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnCor12.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnCor13.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnCor21.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnCor22.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.lnCor23.setInputMask(QCoreApplication.translate("Grading2", u"D D D D D", None))
self.btnBack.setText(QCoreApplication.translate("Grading2", u"\uc774\uc804", None))
self.btnRe.setText(QCoreApplication.translate("Grading2", u"\ub2e4\uc2dc", None))
self.btnNext.setText(QCoreApplication.translate("Grading2", u"\ub2e4\uc74c", None))
# retranslateUi
|
from scipy import integrate
from django.db import connection, IntegrityError, transaction
import json
import requests as api_requests
from itertools import chain
from bevim.models import Experiment, Job, Sensor, Acceleration, Amplitude, Frequency, Speed, ExperimentFrequency
from bevim_project.settings import REST_BASE_URL
from django.db.models.signals import post_save
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.translation import ugettext as _
# Util methods - Controller
class ExperimentUtils:
def save_frequency(frequencies, experiment):
with transaction.atomic():
for timestamp, frequency in frequencies.items():
ExperimentFrequency.objects.create(
experiment=experiment,
frequency=frequency,
timestamp=timestamp
)
def save_data(experiment_data, data_class):
first_job = None
with transaction.atomic():
for data in experiment_data:
sensor = Sensor.objects.get(name=data[0])
job = Job.objects.get(pk=data[5])
first_job = job
data_class.objects.create(sensor=sensor, x_value=data[1], y_value=data[2],
z_value=data[3], timestamp=data[4], job=job)
if first_job is not None:
experiment = first_job.experiment
return experiment
return None
def free_equipment(experiment_id):
experiment = Experiment.objects.get(pk=experiment_id)
experiment.active = False
experiment.save()
def get_experiment_result(experiment_id, sensor_id):
jobs = Job.objects.filter(experiment=experiment_id)
accelerations = []
amplitudes = []
frequencies = []
speeds = []
jobs_initial_timestamp = []
for job in jobs:
job_accelerations = job.acceleration_set.all()
if job_accelerations:
job_amplitudes = job.amplitude_set.all()
job_speeds = job.speed_set.all()
sensor_accelerations = job_accelerations.filter(sensor_id=sensor_id)
sensor_amplitudes = job_amplitudes.filter(sensor_id=sensor_id)
sensor_speeds = job_speeds.filter(sensor_id=sensor_id)
if sensor_accelerations:
jobs_initial_timestamp.append(str(sensor_accelerations[0].timestamp))
accelerations = list(chain(accelerations, sensor_accelerations))
amplitudes = list(chain(amplitudes, sensor_amplitudes))
speeds = list(chain(speeds, sensor_speeds))
accelerations_chart_data = ExperimentUtils.get_chart_data(accelerations, _('Acceleration x Time'), '#b30000')
amplitudes_chart_data = ExperimentUtils.get_chart_data(amplitudes, _('Amplitude x Time'), '#ff8000')
speeds_chart_data = ExperimentUtils.get_chart_data(speeds, _('Speed x Time'), '#8000ff')
result = {
'accelerations_chart_data' : accelerations_chart_data,
'amplitudes_chart_data': amplitudes_chart_data,
'speeds_chart_data': speeds_chart_data,
'jobs_initial_timestamp': jobs_initial_timestamp
}
return result
def get_chart_data(result_data, chart_description, color):
timestamps = ['x']
data_values = [chart_description]
if result_data:
for data in result_data:
timestamps.append(data.timestamp)
# data_values.append(data.x_value) # Get sensor data from axis z
data_values.append(data.z_value) # Get sensor data from axis z
columns = [timestamps, data_values]
colors = {chart_description: color}
chart_data = ExperimentUtils.get_dict_chart(columns, colors)
return chart_data
def get_dict_chart(columns, colors):
if columns:
chart_data = {
'x' : 'x',
'columns': columns,
'colors': colors
}
else:
chart_data = {}
chart_data = json.dumps(chart_data, cls=DjangoJSONEncoder)
return chart_data
def process_data(data_array):
timestamps = []
x_values = []
y_values = []
z_values = []
for data in data_array:
timestamps.append(data[4])
x_values.append(int(data[1]))
y_values.append(int(data[2]))
z_values.append(int(data[3]))
data_processed_x_axis = integrate.cumtrapz(x_values, timestamps, initial=0)
data_processed_y_axis = integrate.cumtrapz(y_values, timestamps, initial=0)
data_processed_z_axis = integrate.cumtrapz(z_values, timestamps, initial=0)
data_processed = ExperimentUtils.format_integral_array(data_processed_x_axis, data_processed_y_axis, data_processed_z_axis, data_array[:])
return data_processed
def format_integral_array(data_processed_x_axis, data_processed_y_axis, data_processed_z_axis, data_array):
i = 0
data_processed = data_array[:]
for data in data_processed:
data[1] = data_processed_x_axis[i]
data[2] = data_processed_y_axis[i]
data[3] = data_processed_z_axis[i]
i += 1
return data_processed
def get_frequency_charts(experiment_id):
experiment = Experiment.objects.get(pk=experiment_id)
real_data = ExperimentUtils.get_frequency_real_data(experiment)
ideal_data = ExperimentUtils.get_frequency_ideal_data(experiment, real_data['timestamps'])
columns = [real_data['timestamps'], real_data['frequency_real_values'], ideal_data['frequency_ideal_values']]
colors = {real_data['chart_description']: "#0080ff", ideal_data['chart_description']: "#ff8000"}
chart_data = ExperimentUtils.get_dict_chart(columns, colors)
return chart_data
def get_frequency_ideal_data(experiment, real_timestamps):
jobs = experiment.job_set.all()
timestamps = ['x']
chart_description = _('Ideal Frequency x Time')
frequency_values = [chart_description]
previous_timestamp = 0
current_timestamp = 0
if jobs:
for job in jobs:
previous_timestamp = current_timestamp
current_timestamp += (job.job_time * 1000)
timestamps_to_add = ExperimentUtils.get_timestamps_to_add(
real_timestamps[1:],
previous_timestamp, current_timestamp)
for timestamp in timestamps_to_add:
frequency_values.append(str(job.choose_frequency))
data = {
'chart_description': chart_description,
'frequency_ideal_values': frequency_values
}
return data
def get_timestamps_to_add(real_timestamps, previous_timestamp, current_timestamp):
timestamps_to_add = []
for timestamp in real_timestamps:
timestamp = int(timestamp)
if (previous_timestamp != 0):
previous_timestamp_criteria = timestamp > previous_timestamp
else:
previous_timestamp_criteria = timestamp >= previous_timestamp
if previous_timestamp_criteria and timestamp <= current_timestamp:
timestamps_to_add.append(timestamp)
return timestamps_to_add
def get_frequency_real_data(experiment):
frequencies = experiment.experimentfrequency_set.all().order_by('timestamp')
timestamps = ['x']
chart_description = _('Real Frequency x Time')
frequency_values = [chart_description]
if frequencies:
for frequency in frequencies:
timestamps.append(str(frequency.timestamp))
frequency_values.append(str(frequency.frequency))
data = {
'timestamps': timestamps,
'frequency_real_values': frequency_values,
'chart_description': chart_description
}
return data
class RestUtils:
TIMEOUT = 10 # In seconds
@classmethod
def post_to_rasp_server(cls, url, data=None, headers=None):
url_to_rest = REST_BASE_URL + url
if headers is None:
headers = {'content-type': 'application/json'}
response = api_requests.post(url_to_rest, data=json.dumps(data),
headers=headers, timeout=cls.TIMEOUT)
return response
@classmethod
def put_to_rasp_server(cls, url, data=None, headers=None):
url_to_rest = REST_BASE_URL + url
if headers is None:
headers = {'content-type': 'application/json'}
response = api_requests.put(url_to_rest, data=json.dumps(data),
headers=headers, timeout=cls.TIMEOUT)
return response
@classmethod
def get_from_rasp_server(cls, url, params=None):
url_to_rest = REST_BASE_URL + url
response = api_requests.get(url_to_rest, params=params, timeout=cls.TIMEOUT)
return response
|
<filename>board.py<gh_stars>0
from utils import Utils
from typing import List
class Cell:
EMPTY = 0
def __init__(self, pos: tuple, blocked: bool, size: int, value: int):
self.pos = pos
self.is_blocked = blocked
self.value = value
self.guesses = [i for i in range(1, size + 1)]
self.reasons = {i: "" for i in self.guesses}
def remove_guess(self, value: int) -> bool:
"""Tries removing value from the `self.guesses`. Returns `True` if successful."""
if self.is_blocked:
raise AttributeError(f"Trying to remove guess of blocked cell at position {self.pos}.")
if value in self.guesses:
self.guesses.remove(value)
if len(self.guesses) == 0:
raise Warning(f"Deleted last guess {value} from cell at position {self}.")
return True
return False
def remove_guess_set(self, value_set: set) -> bool:
"""Tries removing all values in `value_set` from self.guesses. Returns `True` if any were removed."""
result = False
for value in value_set:
success = self.remove_guess(value)
result = result or success
return result
def unique_guess_left(self) -> bool:
"""Returns whether there is only one possible guess left."""
return len(self.guesses) == 1
def try_filling_unique_guess(self) -> bool:
"""Fills the cell if only one possible value is left. Returns `True` if successful."""
if self.is_blocked:
raise Exception(f'Trying to write to blocked cell at position {self.pos}".')
if self.unique_guess_left():
self.value = self.guesses[0]
return True
return False
def __repr__(self):
return "Pos: " + repr(self.pos) + " Value: " + str(self.value)
@property
def is_empty(self) -> bool:
return self.value == Cell.EMPTY
class Board:
def __init__(self, size: int):
self.size = size
self.all_pos = [(x, y) for x in range(size) for y in range(size)]
self.grid = {pos: Cell(pos, blocked=False, size=size, value=Cell.EMPTY) for pos in self.all_pos}
def load(self, filename: str):
"""Loads a game state from file."""
number_board = {}
blocked_board = {}
with open(filename) as f:
for decoder_function, board in zip([Utils.decode_blocked_board_line, Utils.decode_number_board_line],
[blocked_board, number_board]):
for y in range(self.size):
line = next(f).strip()
row = decoder_function(line)
if len(line) != self.size:
raise Exception(
f"Corrupted text file. Length of line is {len(line)} and should be {self.size}.")
row = {(x, y): row[x] for x in range(self.size)}
board.update(row)
try: # The empty line is only after the first block
empty_line = next(f).strip()
if empty_line != "":
raise Exception(f'Corrupted text file. Expected empty line, but got "{empty_line}".')
except StopIteration:
pass
for pos in self.all_pos:
self.grid[pos].is_blocked = blocked_board[pos]
self.grid[pos].value = number_board[pos]
def get_cell(self, pos: tuple) -> Cell:
"""Returns the cell at position `pos`."""
x, y = pos
if x < 0 or x >= self.size or y < 0 or y >= self.size:
raise IndexError(f"Position ({x},{y}) is not on the board.")
return self.grid[pos]
def get_guesses(self, pos: tuple) -> list:
"""
Returns the guesses of a cell.
:param pos: tuple
:return: list
"""
return self.get_cell(pos).guesses
def is_blocked(self, pos: tuple) -> bool:
"""
Returns whether the cell is blocked.
:type pos: tuple
"""
return self.get_cell(pos).is_blocked
def is_empty(self, pos: tuple) -> bool:
"""
Checks if cell is empty.
:param pos: tuple
:return: bool
"""
return self.get_cell(pos).is_empty
def set_cell_value(self, pos: tuple, value: int):
"""
Sets cell to number.
:param pos:
:param value:
"""
self.get_cell(pos).value = value
def remove_guess(self, pos: tuple, value: int):
"""
Remove `value` from the list of guesses in `cell`.
:param pos:
:param value:
"""
self.get_cell(pos).remove_guess(value)
def unique_guess_left(self, pos):
return self.get_cell(pos).unique_guess_left()
@property
def all_cells(self) -> List[Cell]:
return list(self.grid.values())
def save(self, filename: str):
pass
|
from PyUnityVibes.UnityFigure import UnityFigure
import time, math
import numpy as np
# Function of the derivative of X
def xdot(x, u):
return np.array([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])], [u[0, 0]], [u[1, 0]]])
# Function witch return the command to follow to assure the trajectory
def control(x, w, dw):
A = np.array([[-x[3, 0]*math.sin(x[2, 0]), math.cos(x[2, 0])], [x[3, 0]*math.cos(x[2, 0]), math.sin(x[2, 0])]])
y = np.array([[x[0, 0]], [x[1, 0]]])
dy = np.array([[x[3, 0]*math.cos(x[2, 0])], [x[3, 0]*math.sin(x[2, 0])]])
v = w - y + 2*(dw - dy)
return np.linalg.inv(A) @ v
# Function for the command with supervisor - alpha the time step between the follower and followed
def followSupervisor(alpha):
w = np.array([[Lx * math.sin(0.1 * (t-alpha))], [Ly * math.cos(0.1 * (t-alpha))]])
dw = np.array([[Lx * 0.1 * math.cos(0.1 * (t-alpha))], [-Ly * 0.1 * math.sin(0.1 * (t-alpha))]])
return w, dw
if __name__ == "__main__":
# Initialization of the figure
# Parameters:
# figType: the dimension of the figure (see UnityFigure.FIGURE_*)
# scene: the scene to be loaded (see UnityFigure.SCENE_*)
figure = UnityFigure(UnityFigure.FIGURE_3D, UnityFigure.SCENE_EMPTY)
time.sleep(1)
# Initialization variables
dt = 0.16
xa = np.array([[10], [0], [1], [1]])
ua = np.array([[0], [0]])
xb = np.array([[0], [0], [1], [2]])
dxa, dxb = 0, 0
dza, dzb = 0, 0
s = (4, int(20/dt) + 1)
l = 6
Lx = 15
Ly = 7
# Creation of a submarine and a black box which represents the sensor
anim = figure.createAnimation(dt)
time.sleep(1)
sub = figure.create(UnityFigure.OBJECT_3D_SUBMARINE, 0, -0.4, 0, dimX=5, dimY=5, dimZ=5)
anim.addObject(sub)
sensor = figure.create(UnityFigure.OBJECT_3D_CUBE, 0, -0.5, 0, dimX=0.2, dimY=0.2, dimZ=1, color=UnityFigure.COLOR_BLACK)
anim.addObject(sensor)
# Track the submarine with the camera
# sub1.track()
time.sleep(1)
# Loop with the follow function
for t in np.arange(0, 70, dt):
# Ellipse to follow
wa = np.array([[Lx * math.sin(0.1 * t)], [Ly * math.cos(0.1 * t)]])
dwa = np.array([[Lx * 0.1 * math.cos(0.1 * t)], [-Ly * 0.1 * math.sin(0.1 * t)]])
ua = control(xa, wa, dwa)
# Sensor follow the submarine
wb, dwb = followSupervisor(4)
ub = control(xb, wb, dwb)
# Evolution Equations
xa = xa + dt * xdot(xa, ua)
xb = xb + dt * xdot(xb, ub)
# Append the new frame with calculated position to the submarine and sensor
# Calculation of the rotation angle to maintain the direction of the objects
angle1 = math.atan2(dxa - xa[0][0], dza - xa[1][0]) - math.pi
angle2 = math.atan2(dxb - xb[0][0], dzb - xb[1][0]) - math.pi
anim.appendFrame(sub, x=xa[0][0], y=-0.4, z=xa[1][0], rx=0, ry=math.degrees(angle1), rz=0)
anim.appendFrame(sensor, x=xb[0][0], y=-0.4, z=xb[1][0], rx=0, ry=math.degrees(angle2), rz=0)
# Updating the last position for the direction angle calculation
dxa, dxb = xa[0][0], xb[0][0]
dza, dzb = xa[1][0], xb[1][0]
time.sleep(1)
# Start the animation
figure.animate(anim)
|
<filename>adm/adm_tool.py
"""
****************************************************************************************************************************************************************
****************************************************************************************************************************************************************
___ ____ __ ___ ____ __ _______ ____ _ __
/ | / __ \/ |/ / / __ \/ |/ / __ \ / __ \_________ (_)__ _____/ /_
/ /| | / / / / /|_/ / ______ / /_/ / /|_/ / / / / / /_/ / ___/ __ \ / / _ \/ ___/ __/
/ ___ |/ /_/ / / / / /_____/ / ____/ / / / /_/ / / ____/ / / /_/ / / / __/ /__/ /_
/_/ |_/_____/_/ /_/ /_/ /_/ /_/_____/ /_/ /_/ \____/_/ /\___/\___/\__/
/___/
****************************************************************************************************************************************************************
****************************************************************************************************************************************************************
Copyright (c) 2018, Dolby Laboratories Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
****************************************************************************************************************************************************************
****************************************************************************************************************************************************************
"""
# **************************************************************************************************************************************************************
# **************************************************************************************************************************************************************
# Consts
from adm.adm_const import DIRECTSPEAKERS, MATRIX, OBJECTS, HOA, BINAURAL
from adm.adm_const import MIXED_CONTENT, UNDEFINED, MUSIC, EFFECT, COMPLETE_MAIN
from adm.adm_const import DIALOGUE, VOICEOVER, SPOKEN_SUBTITLE, AUDIO_DESCRIPTION, COMMENTARY, EMERGENCY
from adm.adm_const import NON_DIALOGUE_CONTENT_KIND, DIALOGUE_CONTENT_KIND, MIXED_CONTENT_KIND, NON_DIALOGUE_CONTENT, DIALOGUE_CONTENT, DIALOGUE_TEXT
from adm.adm_const import POLAR, CARTESIAN, COORDINATE_MODE
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_A, LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_B
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_D, LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_J
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_2_0, LOUDSPEAKER_CONFIG_COMMON_USE_5_0, LOUDSPEAKER_CONFIG_COMMON_USE_5_1
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_5_1_4, LOUDSPEAKER_CONFIG_COMMON_USE_5_0_4
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_7_0_4, LOUDSPEAKER_CONFIG_COMMON_USE_7_1_4
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_ALT_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_AZIMUTH
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_ALT_AZIMUTH
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_ELEVATION
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_DISTANCE
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_HORIZONTAL_SPEAKER_ALT_SWITCH
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_RANGE_START
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_SOUND_SYSTEM_RANGE_END
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_ALT_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_AZIMUTH
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_ALT_AZIMUTH
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_ELEVATION
from adm.adm_const import LOUDSPEAKER_CONFIG_BS_2051_VERTICAL_SPEAKER_DISTANCE
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_XPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_YPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_ALT_YPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_ZPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_LFE_INDEX_VALUE
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_HORIZONTAL_SPEAKER_ALT_SWITCH
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_RANGE_START
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_RANGE_END
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_VERTICAL_SPEAKER_LABELS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_VERTICAL_SPEAKER_XPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_VERTICAL_SPEAKER_YPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_COMMON_USE_VERTICAL_SPEAKER_ZPOS
from adm.adm_const import LOUDSPEAKER_CONFIG_HORIZONTAL_CHANNEL_MASK
from adm.adm_const import LOUDSPEAKER_CONFIG_VERTICAL_CHANNEL_MASK
from adm.adm_const import MIN_AUDIO_CHANNEL, MAX_AUDIO_CHANNEL
from adm.adm_const import MIN_MAX_AZIMUTH, MIN_MAX_ELEVATION, MIN_MAX_DISTANCE
from adm.adm_const import MIN_MAX_XPOS, MIN_MAX_YPOS, MIN_MAX_ZPOS
# Profile support **********************************************************************************************************************************************
from adm.adm_const import LIMITS_FILE
# Logging and related ******************************************************************************************************************************************
from adm.adm_const import ADM_LOG_FILE
from adm.adm_const import MSG_VALIDATION_PASS, MSG_VALIDATION_FAIL, MSG_VALIDATION_ABRT, MSG_INVALID_STUB, MSG_TYP_LBL_IS_DS, MSG_TYP_LBL_IS_OB
from adm.adm_const import MSG_FND_EL, MSG_MIN_EL, MSG_MAX_EL, MSG_FND_NO, MSG_LIM_NO, MSG_CONTENT_INFO
# XML file parsing *********************************************************************************************************************************************
from adm.adm_const import ADM_XML_TYP_DS, ADM_XML_TYP_OB,ADM_XML_INT_TYP_DS, ADM_XML_INT_TYP_OB
from adm.adm_const import ADM_XML_MODE_FILE
from adm.adm_const import ADM_XML_MODE_STRING
from adm.adm_const import ADM_XML_CM, ADM_XML_FT, ADM_XML_AF
from adm.adm_const import ADM_XML_APR_ELN, ADM_XML_APR_ELN_AT_NM, ADM_XML_APR_ELN_AT_LN, ADM_XML_APR_ELN_AT_ID, ADM_XML_APR_ELN_SE_PL
from adm.adm_const import ADM_XML_APR_ELN_SE_PL_AT_LN, ADM_XML_APR_ELN_SE_CR, ADM_XML_APR_ELN_SE_LM, ADM_XML_APR_ELN_SE_AO
from adm.adm_const import ADM_XML_ACO_ELN, ADM_XML_ACO_ELN_AT_ID, ADM_XML_ACO_ELN_AT_NM, ADM_XML_APR_ELN_SE_DG
from adm.adm_const import ADM_XML_AOB_ELN, ADM_XML_AOB_ELN_AT_ID, ADM_XML_AOB_ELN_AT_NM, ADM_XML_AOB_ELN_SE_AP
from adm.adm_const import ADM_XML_AOB_ELN_SE_AT, ADM_XML_AOB_ELN_SE_HL, ADM_XML_AOB_ELN_SE_GN
from adm.adm_const import ADM_XML_APF_ELN, ADM_XML_APF_ELN_AT_ID, ADM_XML_APF_ELN_AT_NM, ADM_XML_APF_ELN_AT_TL, ADM_XML_APF_ELN_SE_AC
from adm.adm_const import ADM_XML_ACF_ELN, ADM_XML_ACF_ELN_AT_ID, ADM_XML_ACF_ELN_AT_NM, ADM_XML_ACF_ELN_SE_AB, ADM_XML_ACF_ELN_SE_AB_SE_CT
from adm.adm_const import ADM_XML_ACF_ELN_SE_AB_SE_SL, ADM_XML_ACF_ELN_SE_AB_SE_PS, ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_XC, ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_YC
from adm.adm_const import ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_ZC, ADM_XML_ACF_ELN_SE_AB_SE_HR, ADM_XML_ACF_ELN_SE_AB_SE_HR_AT_BP, ADM_XML_ACF_ELN_SE_AB_SE_HR_AT_MD
from adm.adm_const import ADM_XML_ACF_ELN_SE_AB_AT_ID, ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_CO, ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_XC, ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_YC
from adm.adm_const import ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_ZC
from adm.adm_const import ADM_XML_ATU_ELN, ADM_XML_ATU_ELN_AT_ID, ADM_XML_ATU_ELN_SE_CF, ADM_XML_ATU_ELN_SE_PF
from adm.adm_const import ADM_XML_APR_ELN_SE_DG_AT_ND, ADM_XML_APR_ELN_SE_DG_AT_DC, ADM_XML_APR_ELN_SE_DG_AT_MC
from adm.adm_const import ADM_XML_INDENT, PMD_XML_MODE_FILE, PMD_XML_MODE_STRING
from adm.adm_const import SADM_XML_TTF_ELN_SE_AT_SE_AR, SADM_XML_TTF_ELN_SE_AT_AT_TI, SADM_XML_FH, SADM_XML_TTF_ELN , SADM_XML_TTF_ELN_SE_AT
# Classes ******************************************************************************************************************************************************
from adm.adm_classes import AudioProgramme, AudioContent, AudioObject, AudioBlockFormat, AudioPackFormat, AudioProgrammeLabel
from adm.adm_classes import AudioChannelFormat, AudioTrackFormat, AudioTrackUID, AudioMXFLookUp, AudioStreamFormat
from adm.adm_classes import Objects, DirectSpeakers, HeadphoneRender, LoudnessMetadata, Dialogue, NonDialogueContentKind, DialogueContentKind, MixedContentKind
from adm.adm_classes import AudioObjectInteraction, GainInteractionRange, PositionInteractionRange, Zone, ChannelLock, JumpPosition
from adm.adm_classes import ItemValidationData, Position, Headphone, AudioFormatExtended
# External system **********************************************************************************************************************************************
import csv
import logging
import xml.etree.cElementTree as ET
from collections import defaultdict
import xml.dom.minidom as minidom
import os
# Globals to keep track of ID allocation, lists etc. ***********************************************************************************************************
audio_object_counter = 0
audio_content_counter = 0
audio_programme_counter = 0
audio_pack_format_counter = 0
audio_channel_format_counter = 0
audio_stream_format_counter = 0
audio_track_format_counter = 0
audio_block_format_counter = 0
audio_track_uid_counter = 0
coordinate_mode = POLAR
audio_programme_list = []
audio_content_list = []
audio_object_list = []
audio_channel_format_list = []
audio_pack_format_list = []
audio_block_format_list = []
audio_track_uid_list = []
transport_track_format_list = []
# ************************************************************************************************************************************************************ #
# ************************************************************************************************************************************************************ #
def check_parameter_type(parameter, expected_type, calling_function):
if type(parameter) is not expected_type:
logging.debug(str(type(parameter)) + " value of " + str(parameter) + ' is not the expected data type of ' + str(
expected_type) + ' in ' + str(calling_function))
return
def check_parameter_value_range(parameter, min_value, max_value, calling_function):
if parameter < min_value or parameter > max_value:
logging.debug(str(type(parameter)) + " value of " + str(parameter) + ' is out of min/max range of ' + str(
min_value) + ' to ' + str(max_value) + ' in ' + str(calling_function))
return
def prettify_xml(elem):
# This cleans up output from element tree (xml doesn't have indents, new lines, ugly), uses inbuilt minidom lib
# Google's XML style guide states 2 space indents for sub element nesting
rough_string = ET.tostring(elem, 'utf-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=ADM_XML_INDENT)
def find_list_reference_by_name(list_to_search, name):
for i in range(0, len(list_to_search)):
if list_to_search[i].name == name:
return list_to_search[i]
return None
def find_list_reference_by_id(list_to_search, id):
for i in range(0, len(list_to_search)):
if list_to_search[i].id == id:
return list_to_search[i]
return None
def get_audio_program_reference(object_name):
i = 0
while i < audio_programme_list.__len__():
if audio_programme_list[i].name == object_name:
return audio_programme_list[i]
i += 1
return None
def get_audio_content_reference(object_name):
i = 0
while i < audio_object_list.__len__():
if audio_content_list[i].name == object_name:
return audio_content_list[i]
i += 1
return None
def parse_adm_xml(xml_struct, mode):
global audio_programme_list, audio_content_list, audio_object_list, audio_channel_format_list
global audio_pack_format_list, audio_block_format, audio_track_uid_list, transport_track_format_list
# Clear out model lists
del audio_programme_list[:]
del audio_content_list[:]
del audio_object_list[:]
del audio_channel_format_list[:]
del audio_pack_format_list[:]
del audio_block_format_list[:]
del audio_track_uid_list[:]
del transport_track_format_list[:]
tree = None
xml_audio_programme_list = []
xml_audio_content_list = []
xml_audio_object_list = []
xml_audio_channel_format_list = []
xml_audio_pack_format_list = []
xml_audio_block_format_list = []
xml_audio_track_uid_list = []
xml_transport_track_format_list = []
# Is source a blob of XML or an XML file ?
if mode == ADM_XML_MODE_FILE:
logging.info('Parsing XML ADM file ') # + xml_struct.name)
tree = ET.ElementTree(file=xml_struct)
elif mode == ADM_XML_MODE_STRING:
logging.info('Parsing XML ADM blob ')
tree = ET.ElementTree(ET.fromstring(xml_struct))
tree.getroot()
root = tree.getroot()
# Find root of metadata, there are two variants with S-ADM, one with and one without coreMetadata in the structure
sadm_format_root = root.find(ADM_XML_CM)
if sadm_format_root is not None:
adm_format_root = sadm_format_root.find(ADM_XML_FT)
adm_format_extended_root = adm_format_root.find(ADM_XML_AF)
else:
adm_format_extended_root = root.find(ADM_XML_AF)
# Get virtual to physical track mapping info
sadm_frame_header_root = root.find(SADM_XML_FH)
if sadm_frame_header_root is not None:
sadm_transport_track_format = sadm_frame_header_root.find(SADM_XML_TTF_ELN)
if sadm_transport_track_format is not None:
xml_transport_track_format_list = sadm_transport_track_format.findall(SADM_XML_TTF_ELN_SE_AT)
if adm_format_extended_root is not None:
xml_audio_programme_list = adm_format_extended_root.findall(ADM_XML_APR_ELN)
else:
logging.critical('Failed to find ' + ADM_XML_APR_ELN + ' *** Aborting ***')
return False
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN, len(xml_audio_programme_list)):
return False
# Populate programmes list from XML source
if xml_audio_programme_list is not None:
for i in range(0, len(xml_audio_programme_list)):
audio_programme_list.append(AudioProgramme(xml_audio_programme_list[i].attrib[ADM_XML_APR_ELN_AT_NM],
xml_audio_programme_list[i].attrib[ADM_XML_APR_ELN_AT_LN],
xml_audio_programme_list[i].attrib[ADM_XML_APR_ELN_AT_ID]))
# For each program get all audioProgrammeLabels, audioContentIDRefs, and loudnessMetadata
k = xml_audio_programme_list[i].findall(ADM_XML_APR_ELN_SE_PL)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN_SE_PL, len(k)):
return False
for j in range(0, len(k)):
audio_programme_list[i].audio_programme_label.append(AudioProgrammeLabel(k[j].text, k[j].attrib[ADM_XML_APR_ELN_SE_PL_AT_LN]))
k = xml_audio_programme_list[i].findall(ADM_XML_APR_ELN_SE_CR)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN_SE_CR, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_programme_list[i].audio_content_idref.append(k[j].text)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APR_ELN_SE_CR)
# TODO actually grab loudness metadata and populate
k = xml_audio_programme_list[i].findall(ADM_XML_APR_ELN_SE_LM)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN_SE_LM, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_programme_list[i].loudness_metadata = LoudnessMetadata()
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APR_ELN_SE_LM)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APR_ELN)
# Populate content list from XML source
xml_audio_content_list = adm_format_extended_root.findall(ADM_XML_ACO_ELN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ACO_ELN, len(xml_audio_content_list)):
return False
if xml_audio_content_list is not None:
for i in range(0, len(xml_audio_content_list)):
audio_content_list.append(AudioContent(xml_audio_content_list[i].attrib[ADM_XML_ACO_ELN_AT_ID],
xml_audio_content_list[i].attrib[ADM_XML_ACO_ELN_AT_NM]))
# For each content get dialogue entry
k = xml_audio_content_list[i].findall(ADM_XML_APR_ELN_SE_DG)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN_SE_DG, len(k)):
return False
if k is not None:
x = int(k[0].text)
z = DIALOGUE_TEXT[x] + ' = '
if x == 0:
y = int(k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_ND])
z = z + NON_DIALOGUE_CONTENT_KIND[y]
audio_content_list[i].dialogue = Dialogue(k[0].text, k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_ND])
elif x == 1:
y = int(k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_DC])
z = z + DIALOGUE_CONTENT_KIND[y]
audio_content_list[i].dialogue = Dialogue(k[0].text, k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_DC])
elif x == 2:
y = int(k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_MC])
z = z + MIXED_CONTENT_KIND[y]
audio_content_list[i].dialogue = Dialogue(k[0].text, k[0].attrib[ADM_XML_APR_ELN_SE_DG_AT_MC])
logging.info(ADM_XML_APR_ELN_SE_DG + ', ' + z)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APR_ELN_SE_DG)
# For each content get audio object entries
k = xml_audio_content_list[i].findall(ADM_XML_APR_ELN_SE_AO)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APR_ELN_SE_AO, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_content_list[i].audio_object_idref.append(k[j].text)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APR_ELN_SE_AO)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_ACO_ELN)
# Populate object list from XML source
xml_audio_object_list = adm_format_extended_root.findall(ADM_XML_AOB_ELN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_AOB_ELN, len(xml_audio_object_list)):
return False
if xml_audio_object_list is not None:
for i in range(0, len(xml_audio_object_list)):
audio_object_list.append(AudioObject(xml_audio_object_list[i].attrib[ADM_XML_AOB_ELN_AT_ID],
xml_audio_object_list[i].attrib[ADM_XML_AOB_ELN_AT_NM]))
# For each object get audio pack entry
k = xml_audio_object_list[i].findall(ADM_XML_AOB_ELN_SE_AP)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_AOB_ELN_SE_AP, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_object_list[i].audio_pack_idref.append(k[j].text)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_AOB_ELN_SE_AP)
# For each object get audio track uids
k = xml_audio_object_list[i].findall(ADM_XML_AOB_ELN_SE_AT)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_AOB_ELN_SE_AT, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_object_list[i].audio_track_idref.append(k[j].text)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_AOB_ELN_SE_AT)
# For each object get gain
k = xml_audio_object_list[i].findall(ADM_XML_AOB_ELN_SE_GN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_AOB_ELN_SE_GN, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_object_list[i].gain = k[j].text
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_AOB_ELN_SE_GN)
# For each object get headlocked
k = xml_audio_object_list[i].findall(ADM_XML_AOB_ELN_SE_HL)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_AOB_ELN_SE_HL, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_object_list[i].head_locked = k[j].text
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_AOB_ELN_SE_HL)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_AOB_ELN)
# Populate pack format list from XML source
xml_audio_pack_format_list = adm_format_extended_root.findall(ADM_XML_APF_ELN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APF_ELN, len(xml_audio_pack_format_list)):
return False
# TODO set different ranges of channel formats based upon typelabel
if xml_audio_pack_format_list is not None:
for i in range(0, len(xml_audio_pack_format_list)):
audio_pack_format_list.append(AudioPackFormat(xml_audio_pack_format_list[i].attrib[ADM_XML_APF_ELN_AT_ID],
xml_audio_pack_format_list[i].attrib[ADM_XML_APF_ELN_AT_NM],
xml_audio_pack_format_list[i].attrib[ADM_XML_APF_ELN_AT_TL]))
# For each pack get audio channel format
k = xml_audio_pack_format_list[i].findall(ADM_XML_APF_ELN_SE_AC)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_APF_ELN_SE_AC, len(k)):
return False
if k is not None:
for j in range(0, len(k)):
audio_pack_format_list[i].audio_channel_idref.append(k[j].text)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APF_ELN_SE_AC)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_APF_ELN)
# Populate channel format list from XML source
xml_audio_channel_format_list = adm_format_extended_root.findall(ADM_XML_ACF_ELN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ACF_ELN, len(xml_audio_channel_format_list)):
return False
audio_block_format_counter = 0
if xml_audio_channel_format_list is not None:
for i in range(0, len(xml_audio_channel_format_list)):
audio_channel_format_list.append(AudioChannelFormat(xml_audio_channel_format_list[i].attrib[ADM_XML_ACF_ELN_AT_ID],
xml_audio_channel_format_list[i].attrib[ADM_XML_ACF_ELN_AT_NM]))
# Is it a bed or object ? (use to set the cartesian flag)
if xml_audio_channel_format_list[i].attrib[ADM_XML_ACF_ELN_AT_ID].find(ADM_XML_TYP_DS) > -1:
type_label = ADM_XML_INT_TYP_DS
logging.info(ADM_XML_ACF_ELN + MSG_TYP_LBL_IS_DS)
elif xml_audio_channel_format_list[i].attrib[ADM_XML_ACF_ELN_AT_ID].find(ADM_XML_TYP_OB) > -1:
type_label = ADM_XML_INT_TYP_OB
logging.info(ADM_XML_ACF_ELN + MSG_TYP_LBL_IS_OB)
# For each channel format get audio blocks
k = xml_audio_channel_format_list[i].findall(ADM_XML_ACF_ELN_SE_AB)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ACF_ELN_SE_AB, len(k)):
return False
# Currently we assume that there is only one audio block for now as PMD XML does not support dynamic XML timelines
if k is not None:
audio_block_format_list.append(AudioBlockFormat(k[0].attrib[ADM_XML_ACF_ELN_SE_AB_AT_ID]))
audio_block_format_list[audio_block_format_counter].position_coord = Position(False)
# If we are dealing with a type lable of Object then set teh cartesian flag
if type_label == ADM_XML_INT_TYP_OB:
audio_block_format_list[audio_block_format_counter].cartesian = 1
# Get position coordinates and update
m = k[0].findall(ADM_XML_ACF_ELN_SE_AB_SE_PS)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ACF_ELN_SE_AB_SE_PS, len(m)):
return False
for q in range(0, len(m)):
if m[q].attrib[ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_CO] == ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_XC:
audio_block_format_list[audio_block_format_counter].position_coord.x_or_az = m[q].text
if m[q].attrib[ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_CO] == ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_YC:
audio_block_format_list[audio_block_format_counter].position_coord.y_or_el = m[q].text
if m[q].attrib[ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_CO] == ADM_XML_ACF_ELN_SE_AB_SE_PS_AT_ZC:
audio_block_format_list[audio_block_format_counter].position_coord.z_or_ds = m[q].text
# Get speaker label and update
m = k[0].find(ADM_XML_ACF_ELN_SE_AB_SE_SL)
if m is not None:
audio_block_format_list[audio_block_format_counter].speaker_label = m.text
logging.info(MSG_FND_EL + ADM_XML_ACF_ELN_SE_AB_SE_SL + ' ' + m.text)
# Get headphone render and update
m = k[0].find(ADM_XML_ACF_ELN_SE_AB_SE_HR)
if m is not None:
audio_block_format_list[audio_block_format_counter].headphone_render = \
Headphone(m.text, m.attrib[ADM_XML_ACF_ELN_SE_AB_SE_HR_AT_BP], m.attrib[ADM_XML_ACF_ELN_SE_AB_SE_HR_AT_MD])
logging.info(MSG_FND_EL + ADM_XML_ACF_ELN_SE_AB_SE_HR + ' ' + m.text)
audio_block_format_counter += 1
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_ACF_ELN_SE_AB)
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_ACF_ELN)
# Populate audio track uid list from XML source
xml_audio_track_uid_list = adm_format_extended_root.findall(ADM_XML_ATU_ELN)
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ATU_ELN, len(xml_audio_track_uid_list)):
return False
if xml_audio_track_uid_list is not None:
# audio_track_uid_counter = 0
for i in range(0, len(xml_audio_track_uid_list)):
audio_track_uid_list.append(AudioTrackUID(xml_audio_track_uid_list[i].attrib[ADM_XML_ATU_ELN_AT_ID]))
# Get channel format id and update
j = xml_audio_track_uid_list[i].findall(ADM_XML_ATU_ELN_SE_CF)
# TODO Add extra checking for only 1 each of audioChannelFormatIDRef & audioPackFormatIDRef
if j is not None:
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ATU_ELN_SE_CF, len(j)):
return False
audio_track_uid_list[i].channel_format_id = j[0].text
logging.info(MSG_FND_EL + '1 ' + ADM_XML_ATU_ELN_SE_CF + ' ' + j[0].text)
else:
logging.critical(ADM_XML_ATU_ELN + MSG_FND_NO + ADM_XML_ATU_ELN_SE_CF)
return False
# Get pack format id and update
j = xml_audio_track_uid_list[i].findall(ADM_XML_ATU_ELN_SE_PF)
if j is not None:
# Check limits for element, abort if content is invalid
if not get_item_limits(ADM_XML_ATU_ELN_SE_PF, len(j)):
return False
audio_track_uid_list[i].pack_format_id = j[0].text
logging.info(MSG_FND_EL + '1 ' + ADM_XML_ATU_ELN_SE_PF + ' ' + j[0].text)
else:
logging.critical(ADM_XML_ATU_ELN + MSG_FND_NO + ADM_XML_ATU_ELN_SE_PF)
return False
else:
logging.debug(MSG_INVALID_STUB + ADM_XML_ACF_ELN)
# Populate the final model
mdl_audio_programmes = []
mdl_audio_content = []
mdl_audio_object = []
mdl_audio_pack_fmt = []
mdl_audio_channel_fmt = []
mdl_audio_block_fmt = []
mdl_audio_track_uid = []
# Start populating audio programmes
for i in range(0, len(audio_programme_list)):
mdl_audio_programmes.append(AudioProgramme(audio_programme_list[i].name, audio_programme_list[i].language, audio_programme_list[i].id))
mdl_audio_programmes[i].loudness_metadata = audio_programme_list[i].loudness_metadata
mdl_audio_programmes[i].audio_programme_label = audio_programme_list[i].audio_programme_label
# Start populating audio content
for i in range(0, len(audio_content_list)):
mdl_audio_content.append(AudioContent(audio_content_list[i].id, audio_content_list[i].name))
mdl_audio_content[i].dialogue = audio_content_list[i].dialogue
# Start populating audio object
for i in range(0, len(audio_object_list)):
mdl_audio_object.append(AudioObject(audio_object_list[i].id, audio_object_list[i].name))
# Start populating audio pack
for i in range(0, len(audio_pack_format_list)):
mdl_audio_pack_fmt.append(AudioPackFormat(audio_pack_format_list[i].id, audio_pack_format_list[i].name, audio_pack_format_list[i].type_label))
# Start populating audio block
mdl_audio_block_fmt = audio_block_format_list
# Update audio channel with audio block
for i in range(0, len(audio_channel_format_list)):
mdl_audio_channel_fmt.append(AudioChannelFormat(audio_channel_format_list[i].id, audio_channel_format_list[i].name))
search = 'AB_' + mdl_audio_channel_fmt[i].id[3:] + '_00000001'
mdl_audio_channel_fmt[i].audio_block = find_list_reference_by_id(mdl_audio_block_fmt, search)
# Update audio pack with audio channel
for i in range(0, len(mdl_audio_pack_fmt)):
for j in range(0, len(audio_pack_format_list[i].audio_channel_idref)):
mdl_audio_pack_fmt[i].audio_channel_idref.append(find_list_reference_by_id(mdl_audio_channel_fmt, audio_pack_format_list[i].audio_channel_idref[j]))
# Start populating audio track
for i in range(0, len(audio_track_uid_list)):
mdl_audio_track_uid.append(AudioTrackUID(audio_track_uid_list[i].id))
for j in range(0, len(mdl_audio_channel_fmt)):
if mdl_audio_channel_fmt[j].id == audio_track_uid_list[i].channel_format_id:
mdl_audio_track_uid[i].channel_format_id = mdl_audio_channel_fmt[j]
break
for j in range(0, len(mdl_audio_pack_fmt)):
if mdl_audio_pack_fmt[j].id == audio_track_uid_list[i].pack_format_id:
mdl_audio_track_uid[i].pack_format_id = mdl_audio_pack_fmt[j]
break
for j in range(0, len(xml_transport_track_format_list)):
q = xml_transport_track_format_list[j].findall(SADM_XML_TTF_ELN_SE_AT_SE_AR)
if q is not None:
for k in range(0, len(q)):
if q[k].text == audio_track_uid_list[i].id:
mdl_audio_track_uid[i].track_id = xml_transport_track_format_list[j].attrib[SADM_XML_TTF_ELN_SE_AT_AT_TI]
break
# Update audio object with gain, audio_pack_idref, audio_track_uidref
for i in range(0, len(mdl_audio_object)):
# gain
for j in range(0, len(audio_object_list)):
if mdl_audio_object[i].id == audio_object_list[j].id:
mdl_audio_object[i].gain = audio_object_list[j].gain
break
# Audio pack
for j in range(0, len(audio_object_list)):
if mdl_audio_object[i].id == audio_object_list[j].id:
mdl_audio_object[i].audio_pack_idref.append(find_list_reference_by_id(mdl_audio_pack_fmt, audio_object_list[j].audio_pack_idref[0]))
break
# Audio tracks
for j in range(0, len(audio_object_list)):
if mdl_audio_object[i].id == audio_object_list[j].id:
for k in range(0, len(audio_object_list[j].audio_track_idref)):
mdl_audio_object[i].audio_track_idref.append(find_list_reference_by_id(mdl_audio_track_uid, audio_object_list[j].audio_track_idref[k]))
break
# Update audio content with audio_object_idref
for i in range(0, len(mdl_audio_content)):
for j in range(0, len(audio_content_list)):
if mdl_audio_content[i].id == audio_content_list[j].id:
for k in range(0, len(audio_content_list[j].audio_object_idref)):
mdl_audio_content[i].audio_object_idref.append(find_list_reference_by_id(mdl_audio_object, audio_content_list[j].audio_object_idref[k]))
break
# Update audio programmes with audio_content_idref
for i in range(0, len(mdl_audio_programmes)):
for j in range(0, len(audio_programme_list)):
if mdl_audio_programmes[i].id == audio_programme_list[j].id:
for k in range(0, len(audio_programme_list[j].audio_content_idref)):
z = find_list_reference_by_id(mdl_audio_content, audio_programme_list[j].audio_content_idref[k])
mdl_audio_programmes[i].audio_content_idref.append(z)
break
# Package all the data together into a audio format extended container
a = AudioFormatExtended()
a.audio_programme = mdl_audio_programmes
a.audio_content = mdl_audio_content
a.audio_object = mdl_audio_object
a.audio_pack_format = mdl_audio_pack_fmt
a.audio_channel_format = mdl_audio_channel_fmt
a.audio_track_uid = mdl_audio_track_uid
return a
def get_item_limits(item_name, number_found):
# TODO This is slow
return True
minval = None
maxval = None
limits_filename = os.path.dirname(__file__) + "/" + LIMITS_FILE
with open(limits_filename) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
if row["element"] == item_name:
minval = row["min_occurs"]
maxval = row["max_occurs"]
break
if minval is None:
logging.critical(MSG_VALIDATION_FAIL + MSG_FND_NO + item_name + MSG_LIM_NO)
return False
logging.info(MSG_FND_EL + str(number_found) + ' ' + item_name + MSG_MIN_EL + minval + MSG_MAX_EL + maxval)
if number_found < int(minval) or number_found > int(maxval):
logging.critical(MSG_VALIDATION_FAIL + MSG_VALIDATION_ABRT)
return False
else:
logging.info(MSG_VALIDATION_PASS)
return True
def start_logging():
logging.basicConfig(level=logging.ERROR, format='%(asctime)s %(levelname)s %(message)s', filename=ADM_LOG_FILE, filemode='w')
logging.info('Started')
return
# ************************************************************************************************************************************************************ #
# Create the ADM model of content
# ************************************************************************************************************************************************************ #
# start_logging()
# Set global coordinate mode
coordinate_mode = CARTESIAN
if __name__ == "__main__":
print ("main")
cmdline = True
if cmdline:
import os
import linecache
import time
import tracemalloc
# tracemalloc.start()
loop_counter = 1
startsecs = time.time()
for i in range(0, loop_counter):
my_metadata = parse_adm_xml("skip_sadm.xml", ADM_XML_MODE_FILE)
#my_metadata = parse_adm_xml("gen.adm_+_gen.sadm.xml", ADM_XML_MODE_FILE)
endsecs = time.time()
call_time = (endsecs - startsecs) / loop_counter
print('Runtime = ' + str(endsecs - startsecs))
print ('Call time = ' + str(call_time))
"""
key_type = 'lineno'
limit = 10
snapshot = tracemalloc.take_snapshot()
snapshot = snapshot.filter_traces((tracemalloc.Filter(False, "<frozen importlib._bootstrap>"), tracemalloc.Filter(False, "<unknown>")))
top_stats = snapshot.statistics(key_type)
print("Top %s lines" % limit)
print('{: <10}'.format("index"), '{: <40}'.format("filename"), '{: <10}'.format("line no"), '{: <10}'.format("size kB"))
for index, stat in enumerate(top_stats[:limit], 1):
frame = stat.traceback[0]
filename = os.sep.join(frame.filename.split(os.sep)[-2:])
print('{: <10}'.format(index), '{: <40}'.format(filename), '{: <10}'.format(frame.lineno), '{:0.1f}'.format(stat.size / 1024), "kB")
line = linecache.getline(frame.filename, frame.lineno).strip()
if line:
print(' %s' % line)
other = top_stats[limit:]
if other:
size = sum(stat.size for stat in other)
print("%s other: %.1f KiB" % (len(other), size / 1024))
total = sum(stat.size for stat in top_stats)
print("Total allocated size: %.1f KiB" % (total / 1024))
"""
# ************************************************************************************************************************************************************ #
# ************************************************************************************************************************************************************ #
"""
Rules For Parsing ADM
1. Search for all objects
2. For each object store audioObjectID, audioObjectName, audioPackFormatIDRef, gain, headlocked, and list of audioTrackUIDRef
3. Search for each audioPackFormatID
4. For each audio pack store audioPackFormatID, audioPackFormatName, typeLabel, and list of audioChannelFormatIDRef
5. Search for each audioChannelFormatID
6. For each channel format store audioChannelFormatID, audioChannelFormatName, typeLabel, and list of audioBlockFormat entries
7. Search for each audioBlockFormat
7. For each audioBlockFormat store audioBlockFormatID, cartesian flag, position coordinates, speaker label, objectDivergence, headphoneRender
8. Search for each audioTrackUID
9. For each audioTrack store UID, audioChannelFormatIDRef, audioPacklFormatIDRef
10. Search for each audioProgramme
11. For each audioProgramme store audioProgrammeID, audioProgrammeName, audioProgrammeLanguage, loudnessMetadata, list of audioProgrammeLabel, list of audioContentIDRef
12. Search for each audioContent
13. For each store audioContentID, audioContentName, list of audioObjectIDRef, dialog
"""
# ************************************************************************************************************************************************************ #
|
<reponame>yunjung-lee/class_python_numpy
###########################정규화###########################################33
# 정규화 : 최대값과 최소값을 이용하여 0~1 사이의 값을 갖는다.
# 머신러닝(인공신경망)에서 아주 많이 사용하는 함수
import numpy as np
import pandas as pd
import sklearn.preprocessing
from pandas import DataFrame
from sklearn.preprocessing import MinMaxScaler,minmax_scale, Binarizer, binarize
X =np.array(([[10.,-10.,1.],
[5.,0.,2.],
[0.,10.,3]]))
# 중간 기말 등수
print(X.max())
# 10.0
print(X.max(axis=0))
# [10. 10. 3.]
print(X.max(axis=1))
# [10. 5. 10.]
print(X.max(axis=0)-X.min(axis=0))
# [10. 20. 2.]
# 정규화 (min-max scale) : 열단위 계산
print((X-X.min(axis=0))/(X.max(axis=0)-X.min(axis=0)))
# [[1. 0. 0. ]
# [0.5 0.5 0.5]
# [0. 1. 1. ]]
mms = MinMaxScaler()
xmms = mms.fit_transform(X)
print(xmms)
# [[1. 0. 0. ]
# [0.5 0.5 0.5]
# [0. 1. 1. ]]
xmms2=minmax_scale(X, axis=0, copy=True)
print(xmms2)
# [[1. 0. 0. ]
# [0.5 0.5 0.5]
# [0. 1. 1. ]]
#######################이진화#######################################################
# 이진화(0,1) 0이나 1로 나타냄
# 변수의 값이 연속형에 해당하는 변수(신장, 몸무게, 온도)일 때 값을 0또는 1로 변환 : 이진화
# 기준값이 필요함 : 기준값보다 작다, 크다로 판별
# 임계값(threshold) : 변환 기준이 되는 값
# 예 ) 당뇨병 유/무 확인
# 사용 : 회귀분석, 텍스트 마이닝 등에서 사용
# Binarizer 로 분석
X =np.array(([[10.,-10.,1.],
[5.,0.,2.],
[0.,10.,3.]]))
binarizer = Binarizer().fit(X)
print(binarizer)
# Binarizer(copy=True, threshold=0.0) # copy= : True 복사하여 함수에 사용, False : 직접 함수에 사용
print(binarizer.transform(X))
# [[1. 0. 1.]
# [1. 0. 1.]
# [0. 1. 1.]]
binarizer = Binarizer(threshold=2.0)
print(binarizer.transform(X))
#Binarizer().fit(X) 보다 binarize가 편하다.
print(binarize(X,threshold=2.0))
# [[1. 0. 0.]
# [1. 0. 0.]
# [0. 1. 1.]]
print(X)
# [[ 10. -10. 1.]
# [ 5. 0. 2.]
# [ 0. 10. 3.]]
print(binarize(X,threshold=2.0,copy=False))
# [[1. 0. 0.]
# [1. 0. 0.]
# [0. 1. 1.]]
print(X)
# [[1. 0. 0.]
# [1. 0. 0.]
# [0. 1. 1.]]
# 범주형 변수 -> 이진화 : 원핫인코딩 필요
# 성별 : 남(0),여(1)로 인코딩 하겠다.
# 연령 : 20대(0),30대(1),40대(2),50대(3)로 인코딩 하겠다.
# 성적 : A(0),B(1),..,F(5)로 인코딩 하겠다.
#SN 성별 연령대 성적
#1 0 0 1
#2 1 3 0
#....
# 원핫 인코딩 : 인공신경망 모델에서 원핫인코딩으로 변환 => 변환이 없으면 숫자들을 연속성자료로 인식=>숫자사이의 관계 형성)
# 성별(0,1) => 0:01, 1:10
# 연령대(0~3)=> 0:1000, 2:0100, 3:0010, 4:0001
# 성적(0~5) => 0:10000,1:01000, 3:00100, 4:00010, 5:00001
# from sklearn.preprocessing import OneHotEncoder 함수 사용
# # 번호판 판별기
# 1)52가 1234
# 5 : 0000010000
# 2 : 0010000000
# 가
# 1
# 2
# 3
# 4
# 2)코드를 판별기에 입력
# 3)판별기는 판별 결과를
# 0 0.05 0 0 0 0.9 0.05 0 0 0
# 0000010000 => 5
from sklearn.preprocessing import OneHotEncoder
data_train = np.array([[0,0,0],[0,1,1],[0,2,2],[1,0,3],[1,1,4]])
#열 : 성별(2(0,1)), 연령대(3(0,1,2)), 성적(5(0,1,2,3,4))
enc = OneHotEncoder() #thing
print(enc.fit(data_train))
# OneHotEncoder(categorical_features='all', dtype=<class 'numpy.float64'>,
# handle_unknown='error', n_values='auto', sparse=True)
print(enc.active_features_) # 범주의 값을 fit정보로 나타냄
# [0 1 2 3 4 5 6 7 8 9]
#남,여,20,30,40,A,B,C,D,F
#성별 범쥐 2개(0,1),연령대 범주 3개(2,3,4),등급 범주 5개(5,6,7,8,9)
#10가지 등급이 존재함을 나타냄
print(enc.n_values_) #범주의 구성
# [2 3 5]
print(enc.feature_indices_)
#[ 0 2 5 10] #성별은 0이상 2미만, 연령은 2이상 5미만, 등급은 5이상 10미만
#여성(1),40대(2),등급:D(3)
data_new = np.array([[1,2,3]])
print(enc.transform(data_new).toarray())
# [[0. 1. 0. 0. 1. 0. 0. 0. 1. 0.]]
################그룹 바이 ############################################
# 혼합된 데이터를 요소별로 나눠서 처리 후 다시 합치는 분석방법
|
'''
Created on 22 Jan 2013
@author: gfagiolo
'''
import dicom
# import logging
#===============================================================================
# CONSTANTS
#===============================================================================
#SOPClassUID = (0008, 0016)
TAG_SOP_CLASS_UID = dicom.tag.Tag(dicom.datadict.NameDict['SOPClassUID'])
#DimensionIndexes = (0020, 9222)
TAG_DIMENSION_INDEX_SEQUENCE = dicom.tag.Tag(dicom.datadict.NameDict['DimensionIndexes'])
#PerframeFunctionalGroups = (5200, 9230)
TAG_PER_FRAME_FUNCTIONAL_GROUPS_SEQUENCE = dicom.tag.Tag(dicom.datadict.NameDict['PerframeFunctionalGroups'])
UID_ENHANCED_MR_IMAGE_STORAGE = '1.2.840.10008.5.1.4.1.1.4.1'
UID_MR_SPECTROSCOPY_STORAGE = '1.2.840.10008.5.1.4.1.1.4.2'
#DTI related content
DTIDIRNO = 'NONE'
DTIDIRYES = 'DIRECTIONAL'
DTIDIRISO = 'ISOTROPIC'
METADATA_DESCRIPTION ="""#DICOM metadata in flat txt-format
#'key' = 'value'
#where key is a hierarchical set of ('.'-separated) dicom tags and value is the corresponding value
#Parameters common to all frames can be found as either direct parameters (i.e. 'BodyPartExamined')
#or under the sequence 'SharedFunctionalGroupsSequence' (i.e. 'RepetitionTime')
#frame specific parameters can be found under the sequence 'PerFrameFunctionalGroupsSequence',
#note that the number 'n' in 'PerFrameFunctionalGroupsSequence[n]' refers to the frame described
#Frame specific tags are for example 'EffectiveEchoTime'
"""
ANONYMISE_FIELDS = set(
('PatientName',
'PatientID',
'PatientBirthDate',
'PatientSex',
'OperatorsName',
'RequestingPhysician',
'ScheduledPerformingPhysicianName',
'ReferringPhysicianName',))
#===============================================================================
# FUNCTIONS
#===============================================================================
def inspect_dicom(fname):
fp = open(fname, 'rb')
_ = fp.read(0x80)
magic = fp.read(4)
if magic != "DICM":
#logging.debug(fname,'not a dicom file')
raise dicom.filereader.InvalidDicomError
meta = dicom.filereader._read_file_meta_info(fp)
fp.close()
return meta
def is_storagesopclassuid(df, uid):
if isinstance(df, dicom.dataset.FileDataset):
return df[TAG_SOP_CLASS_UID].value == uid
elif isinstance(df, str):
try:
return inspect_dicom(df).MediaStorageSOPClassUID == uid
except dicom.filereader.InvalidDicomError:
#this is not a dicom file
return False
else:
return False
def is_accepted_dicom(df):
if isinstance(df, dicom.dataset.FileDataset):
return df[TAG_SOP_CLASS_UID].value in [UID_ENHANCED_MR_IMAGE_STORAGE, UID_MR_SPECTROSCOPY_STORAGE]
elif isinstance(df, str):
try:
return inspect_dicom(df).MediaStorageSOPClassUID in [UID_ENHANCED_MR_IMAGE_STORAGE, UID_MR_SPECTROSCOPY_STORAGE]
except dicom.filereader.InvalidDicomError:
#this is not a dicom file
return False
else:
return False
def is_multiframe(df):
return is_storagesopclassuid(df, UID_ENHANCED_MR_IMAGE_STORAGE)
def is_mrspectroscopystorage(df):
return is_storagesopclassuid(df, UID_MR_SPECTROSCOPY_STORAGE)
def get_a_frame(df, frame_number=0):
return df[TAG_PER_FRAME_FUNCTIONAL_GROUPS_SEQUENCE][frame_number]
def get_frame_content(frame):
return frame.FrameContentSequence[0]
def get_shared_functional_group_sequence(df):
return df.SharedFunctionalGroupsSequence[0]
def get_shared_functional_group_sequence_repetion_time(SharedFunctionalGroupsSequence):
# logging.debug("SharedFunctionalGroupsSequence %d items"%len(df.SharedFunctionalGroupsSequence))
return SharedFunctionalGroupsSequence.MRTimingAndRelatedParametersSequence[0].RepetitionTime
def dicomobj_to_str(seq, level=0, prefix=None, only_non_private=True, anonymise=True):
def tag_to_name(atag):
try:
return dicom.datadict.DicomDictionary[atag][-1]
except KeyError:
return str(atag).replace(' ','')
def my_repr(self):
repVal = self.repval
s = '%s = %s'%(tag_to_name(self.tag), repVal)
return s
strings = []
for data_element in seq:
name = tag_to_name(data_element.tag)
if anonymise and name in ANONYMISE_FIELDS:
continue
if data_element.VR == "SQ":
#this is a sequence, use its name as the start of the fields contained
if only_non_private and data_element.tag in dicom.datadict.DicomDictionary: # a sequence
#skip private fields
continue
#name = tag_to_name(data_element.tag)
for indx, dataset in enumerate(data_element.value):
if prefix is None:
nprefix = name + '[%d].'%(indx+1)
else:
nprefix = prefix + name + '[%d].'%(indx+1)
strings.append(dicomobj_to_str(dataset, level + 1, nprefix, only_non_private))
else:
if only_non_private and data_element.tag in dicom.datadict.DicomDictionary:
#only non-private fields
if prefix is None:
strings.append(my_repr(data_element))
else:
strings.append(prefix + my_repr(data_element))
elif not only_non_private:
if prefix is None:
strings.append(my_repr(data_element))
else:
strings.append(prefix + my_repr(data_element))
return "\n".join(strings)
def get_repetion_time(df):
# logging.debug("SharedFunctionalGroupsSequence %d items"%len(df.SharedFunctionalGroupsSequence))
return df.SharedFunctionalGroupsSequence[0].MRTimingAndRelatedParametersSequence[0].RepetitionTime
def get_frame_repetion_time(frame):
return frame.MRTimingAndRelatedParametersSequence[0].RepetitionTime
def define_DimensionIndexValues(df):
desc = []
for el in df[TAG_DIMENSION_INDEX_SEQUENCE]:
desc.append((el.DimensionIndexPointer, el.FunctionalGroupPointer))
return desc
def DimensionIndexes_to_tagnames(df):
def find_tag_name(t):
if t in dicom.datadict.DicomDictionary:
tagname = dicom.datadict.DicomDictionary[t][2]
else:
tagname = str(t)
return tagname
desc = define_DimensionIndexValues(df)
return [(find_tag_name(e[0]), find_tag_name(e[1])) for e in desc]
def get_frame_index(frame, idx):
return [frame[e.FunctionalGroupPointer][0][e.DimensionIndexPointer] for e in idx]
def get_dimension_index_description_position(idx, desc):
return [x[1] for x in filter(lambda x:x[0].DimensionDescriptionLabel == desc, zip(idx, range(idx.VM)))][0]
class DiffusionFrameInfo(object):
FRAME_NO = None
DIFF_TYPE = None
DIFF_BVALUE = None
DIFF_BVEC = None
def __init__(self, frame, frame_no):
self.set_frame_no(frame_no)
dti_info = diffusionInfo(frame)
self.set_diff_type(dti_info[0])
self.set_diff_bvalue(dti_info[1])
self.set_diff_bvec(dti_info[2])
def get_frame_no(self):
return self.__FRAME_NO
def get_diff_type(self):
return self.__DIFF_TYPE
def get_diff_bvalue(self):
return self.__DIFF_BVALUE
def get_diff_bvec(self):
return self.__DIFF_BVEC
def set_frame_no(self, value):
self.__FRAME_NO = value
def set_diff_type(self, value):
self.__DIFF_TYPE = value
def set_diff_bvalue(self, value):
self.__DIFF_BVALUE = value
def set_diff_bvec(self, value):
self.__DIFF_BVEC = value
def isDirectional(self):
return self.get_diff_type() == DTIDIRYES
def isUnweighted(self):
return self.get_diff_type() == DTIDIRNO
def toList(self):
return [self.get_diff_type(), self.get_diff_bvalue(), self.get_diff_bvec(), self.get_frame_no()]
def __str__(self):
return str(self.toList())
def __repr__(self):
return str(self)
FRAME_NO = property(get_frame_no, set_frame_no, None, "FRAME_NO's docstring")
DIFF_TYPE = property(get_diff_type, set_diff_type, None, "DIFF_TYPE's docstring")
DIFF_BVALUE = property(get_diff_bvalue, set_diff_bvalue, None, "DIFF_BVALUE's docstring")
DIFF_BVEC = property(get_diff_bvec, set_diff_bvec, None, "DIFF_BVEC's docstring")
def diffusionInfo(frame):
out = [frame.MRDiffusionSequence[0].DiffusionDirectionality,
None,
None]
if 'DiffusionBValue' in frame.MRDiffusionSequence[0]:
out[1] = frame.MRDiffusionSequence[0].DiffusionBValue
if 'DiffusionGradientDirectionSequence' in frame.MRDiffusionSequence[0]:
out[2] = list(
frame.MRDiffusionSequence[0].DiffusionGradientDirectionSequence[0].DiffusionGradientOrientation)
return out
|
<filename>third_party/WebKit/Source/build/scripts/in_file.py
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import os
# NOTE: This has only been used to parse
# core/page/RuntimeEnabledFeatures.in and may not be capable
# of parsing other .in files correctly.
# .in file format is:
# // comment
# name1 arg=value, arg2=value2, arg2=value3
#
# InFile must be passed a dictionary of default values
# with which to validate arguments against known names.
# Sequence types as default values will produce sequences
# as parse results.
# Bare arguments (no '=') are treated as names with value True.
# The first field will always be labeled 'name'.
#
# InFile.load_from_files(['file.in'], {'arg': None, 'arg2': []})
#
# Parsing produces an array of dictionaries:
# [ { 'name' : 'name1', 'arg' :' value', arg2=['value2', 'value3'] }
def _is_comment(line):
return line.startswith("//") or line.startswith("#")
class InFile(object):
def __init__(self, file_paths, lines, defaults, valid_values=None, default_parameters=None):
self.file_paths = file_paths
self.name_dictionaries = []
self.parameters = copy.deepcopy(default_parameters if default_parameters else {})
self._defaults = defaults
self._valid_values = copy.deepcopy(valid_values if valid_values else {})
self._parse(map(str.strip, lines))
@classmethod
def load_from_files(self, file_paths, defaults, valid_values, default_parameters):
lines = []
for path in file_paths:
assert path.endswith(".in")
with open(os.path.abspath(path)) as in_file:
lines += in_file.readlines()
return InFile(file_paths, lines, defaults, valid_values, default_parameters)
def _is_sequence(self, arg):
return (not hasattr(arg, "strip")
and hasattr(arg, "__getitem__")
or hasattr(arg, "__iter__"))
def _parse(self, lines):
parsing_parameters = True
indices = {}
for line in lines:
if _is_comment(line):
continue
if not line:
parsing_parameters = False
continue
if parsing_parameters:
self._parse_parameter(line)
else:
entry = self._parse_line(line)
name = entry['name']
if name in indices:
entry = self._merge_entries(entry, self.name_dictionaries[indices[name]])
entry['name'] = name
self.name_dictionaries[indices[name]] = entry
else:
indices[name] = len(self.name_dictionaries)
self.name_dictionaries.append(entry)
def _merge_entries(self, one, two):
merged = {}
for key in one:
if key not in two:
self._fatal("Expected key '%s' not found in entry: %s" % (key, two))
if one[key] and two[key]:
val_one = one[key]
val_two = two[key]
if isinstance(val_one, list) and isinstance(val_two, list):
val = val_one + val_two
elif isinstance(val_one, list):
val = val_one + [val_two]
elif isinstance(val_two, list):
val = [val_one] + val_two
else:
val = [val_one, val_two]
merged[key] = val
elif one[key]:
merged[key] = one[key]
else:
merged[key] = two[key]
return merged
def _parse_parameter(self, line):
if '=' in line:
name, value = line.split('=')
else:
name, value = line, True
if not name in self.parameters:
self._fatal("Unknown parameter: '%s' in line:\n%s\nKnown parameters: %s" % (name, line, self.parameters.keys()))
self.parameters[name] = value
def _parse_line(self, line):
args = copy.deepcopy(self._defaults)
parts = line.split(' ')
args['name'] = parts[0]
# re-join the rest of the line and split on ','
args_list = ' '.join(parts[1:]).strip().split(',')
for arg_string in args_list:
arg_string = arg_string.strip()
if not arg_string: # Ignore empty args
continue
if '=' in arg_string:
arg_name, arg_value = arg_string.split('=')
else:
arg_name, arg_value = arg_string, True
if arg_name not in self._defaults:
self._fatal("Unknown argument: '%s' in line:\n%s\nKnown arguments: %s" % (arg_name, line, self._defaults.keys()))
valid_values = self._valid_values.get(arg_name)
if valid_values and arg_value not in valid_values:
self._fatal("Unknown value: '%s' in line:\n%s\nKnown values: %s" % (arg_value, line, valid_values))
if self._is_sequence(args[arg_name]):
args[arg_name].append(arg_value)
else:
args[arg_name] = arg_value
return args
def _fatal(self, message):
# FIXME: This should probably raise instead of exit(1)
print message
exit(1)
|
from ..base import MultiGridEnv, MultiGrid
from ..objects import *
class EmptyMultiGrid(MultiGridEnv):
mission = "get to the green square"
metadata = {}
def _gen_grid(self, width, height):
self.grid = MultiGrid((width, height))
self.grid.wall_rect(0, 0, width, height)
self.put_obj(Goal(color="green", reward=1), width - 2, height - 2)
self.agent_spawn_kwargs = {}
self.place_agents(**self.agent_spawn_kwargs)
class EmptyColorMultiGrid(MultiGridEnv):
mission = "all agents get to the same coloured square"
metadata = {}
def __init__(self, *args, goal_coordinates, goal_colors, **kwargs):
self.goal_coordinates = goal_coordinates
self.goal_colors = goal_colors
# Need to do checks that they are the same length
super().__init__(*args, **kwargs)
def _gen_grid(self, width, height):
self.grid = MultiGrid((width, height))
self.grid.wall_rect(0, 0, width, height)
for i, (x, y) in enumerate(self.goal_coordinates):
self.put_obj(ColorGoal(color=self.goal_colors[i], reward=1), x, y)
self.agent_spawn_kwargs = {}
def step(self, actions):
# Spawn agents if it's time.
for agent in self.agents:
if (
not agent.active
and not agent.done
and self.step_count >= agent.spawn_delay
):
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
assert len(actions) == len(self.agents)
step_rewards = np.zeros(
(
len(
self.agents,
)
),
dtype=np.float,
)
self.step_count += 1
iter_agents = list(enumerate(zip(self.agents, actions)))
iter_order = np.arange(len(iter_agents))
self.np_random.shuffle(iter_order)
for shuffled_ix in iter_order:
agent_no, (agent, action) = iter_agents[shuffled_ix]
agent.step_reward = 0
if agent.active:
cur_pos = agent.pos[:]
cur_cell = self.grid.get(*cur_pos)
fwd_pos = agent.front_pos[:]
fwd_cell = self.grid.get(*fwd_pos)
agent_moved = False
# Rotate left
if action == agent.actions.left:
agent.dir = (agent.dir - 1) % 4
# Rotate right
elif action == agent.actions.right:
agent.dir = (agent.dir + 1) % 4
# Move forward
elif action == agent.actions.forward:
# Under the follow conditions, the agent can move forward.
can_move = fwd_cell is None or fwd_cell.can_overlap()
if self.ghost_mode is False and isinstance(fwd_cell, GridAgent):
can_move = False
if can_move:
agent_moved = True
# Add agent to new cell
if fwd_cell is None:
self.grid.set(*fwd_pos, agent)
agent.pos = fwd_pos
else:
fwd_cell.agents.append(agent)
agent.pos = fwd_pos
# Remove agent from old cell
if cur_cell == agent:
self.grid.set(*cur_pos, None)
else:
assert cur_cell.can_overlap()
cur_cell.agents.remove(agent)
# Add agent's agents to old cell
for left_behind in agent.agents:
cur_obj = self.grid.get(*cur_pos)
if cur_obj is None:
self.grid.set(*cur_pos, left_behind)
elif cur_obj.can_overlap():
cur_obj.agents.append(left_behind)
else: # How was "agent" there in teh first place?
raise ValueError("?!?!?!")
# After moving, the agent shouldn't contain any other agents.
agent.agents = []
if isinstance(fwd_cell, (Lava, Goal)):
agent.done = True
if hasattr(fwd_cell, "get_color"):
color = fwd_cell.get_color(agent)
agent.on_color = color
else:
agent.on_color = None
# TODO: verify pickup/drop/toggle logic in an environment that
# supports the relevant interactions.
# Pick up an object
# elif action == agent.actions.pickup:
# if fwd_cell and fwd_cell.can_pickup():
# if agent.carrying is None:
# agent.carrying = fwd_cell
# agent.carrying.cur_pos = np.array([-1, -1])
# self.grid.set(*fwd_pos, None)
# else:
# pass
#
# # Drop an object
# elif action == agent.actions.drop:
# if not fwd_cell and agent.carrying:
# self.grid.set(*fwd_pos, agent.carrying)
# agent.carrying.cur_pos = fwd_pos
# agent.carrying = None
# else:
# pass
#
# # Toggle/activate an object
# elif action == agent.actions.toggle:
# if fwd_cell:
# wasted = bool(fwd_cell.toggle(agent, fwd_pos))
# else:
# pass
# Done action (not used by default)
elif action == agent.actions.done:
pass
else:
raise ValueError(f"Environment can't handle action {action}.")
agent.on_step(fwd_cell if agent_moved else None)
obs = [self.gen_agent_obs(agent) for agent in self.agents]
reward_colors = [a.on_color for a in self.agents if a.on_color]
# Agents get equal rewards if they are all on the same coloured block
if len(reward_colors) == len(step_rewards):
if all(x == reward_colors[0] for x in reward_colors):
rwd = 1
if bool(self.reward_decay):
rwd *= 1.0 - 0.9 * (self.step_count / self.max_steps)
step_rewards[:] += rwd
for agent in self.agents:
agent.reward(rwd)
agent.done = True
# If any of the agents individually are "done"
# (hit lava or in some cases a goal)
# but the env requires respawning, then respawn those agents.
for agent in self.agents:
if agent.done:
if self.respawn:
resting_place_obj = self.grid.get(*agent.pos)
if resting_place_obj == agent:
if agent.agents:
self.grid.set(*agent.pos, agent.agents[0])
agent.agents[0].agents += agent.agents[1:]
else:
self.grid.set(*agent.pos, None)
else:
resting_place_obj.agents.remove(agent)
resting_place_obj.agents += agent.agents[:]
agent.agents = []
agent.reset(new_episode=False)
self.place_obj(agent, **self.agent_spawn_kwargs)
agent.activate()
else: # if the agent shouldn't be respawned, then deactivate it.
agent.deactivate()
# The episode overall is done if all the agents are done, or
# if it exceeds the step limit.
done = (self.step_count >= self.max_steps) or all(
[agent.done for agent in self.agents]
)
return obs, step_rewards, done, {}
|
<gh_stars>1-10
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
from shapely.geometry import Point, Polygon, LineString
# 英文教程 https://geopandas.readthedocs.io/en/latest/install.html
# 中文教程 https://www.bbsmax.com/A/Vx5M9KyL5N/
countries = geopandas.read_file(r"geopandas-tutorial-song\data\ne_110m_admin_0_countries\ne_110m_admin_0_countries.shp")
cities = geopandas.read_file(r"geopandas-tutorial-song\data\ne_110m_populated_places")
rivers = geopandas.read_file(r"geopandas-tutorial-song\data\ne_50m_rivers_lake_centerlines")
def print_head_date():
'''显示前几行数据'''
__countries = countries
head = __countries.head()
print(head)
def print_all_graph():
'''打印shp'''
__countries = countries
__countries.plot()
# countries.show() the code is wrong
# 难死我了,终于解决无法show()的问题了,好多地方给的代码都是错的。
return __countries
def print_area():
'''打印国家、州、面积'''
__countries = countries
area = __countries.geometry.area
continent = __countries['continent']
name = __countries['name']
for i in zip(name, continent, area):
print(i)
def print_continent(continent):
'''打印一个部分'''
__countries = countries
continent = __countries[__countries['continent'] == continent ]
continent.plot()
plt.show()
# print(countries.head())
# print_africa('Asia')
def mean_all():
'''mean()函数功能:求取均值'''
__countries = countries
mean = __countries['pop_est'].mean()
print('mean is', mean)
return mean
# mean_all()
def get_centroid(data):
'''求质心'''
__countries = data
# 新增一列,每个国家的中心点
__countries['geometry'] = __countries.centroid
# 将新增列设置为几何列
__countries = __countries.set_geometry('geometry')
# __countries.plot()
# plt.show()
return __countries
# c = get_centroid(countries)
# c.plot()
# plt.show()
def data_to_shp(data):
__countries = data
'''把GeoDataFrame数据导出到shp文件'''
__countries.to_file(driver='ESRI Shapefile',filename='countries_out.shp')
def area_and_distance():
p = Point(0, 0)
polygon = Polygon([(1, 1), (2,2), (2, 1)])
# 多边形面积
a = polygon.area
# 点到多边形的距离
d = polygon.distance(p)
print(a)
print(d)
def show_together():
'''Plotting our different layers together'''
# 轮廓,填充,尺寸
ax = countries.plot(edgecolor='green', facecolor='none', figsize=(15, 10))
rivers.plot(ax=ax, )
cities.plot(ax=ax, color='red')
# 设置显示的经纬度
ax.set(xlim=(-20, 60), ylim=(-40, 40))
return ax
def NDD():
'''转换GeoSeries中的几何图形到不同的坐标参考系统。
当前GeoSeries的crs属性必须被设置。
crs属性需要被指定以用于输出,或是用字典形式或是用EPSG编码方式。'''
__countries = countries
__countries = __countries[(__countries['name'] != "Antarctica")]
countries_mercator = __countries.to_crs(epsg=3395)
countries_mercator.plot()
plt.show()
# Note on fiona
def fiona():
# 'https://blog.csdn.net/theonegis/article/details/80607262
import fiona
from shapely.geometry import shape
with fiona.Env():
with fiona.open(r"geopandas-tutorial-song\data\ne_110m_admin_0_countries\ne_110m_admin_0_countries.shp") as collection:
for feature in collection:
# ... do something with geometry
geom = shape(feature['geometry'])
# ... do something with properties
print(feature['properties']['name'])
def new_gdf():
'''手动创建一个geodataframe类型数据'''
df = pd.DataFrame(
{'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]})
# 把经纬度打包成一列(x, y)
df['Coordinates'] = list(zip(df.Longitude, df.Latitude))
# (x, y)把转换成点
df['Coordinates'] = df['Coordinates'].apply(Point)
# 把'Coordinates'转换成geometry(几何形状)
gdf = geopandas.GeoDataFrame(df, geometry='Coordinates')
ax = countries[countries.continent == 'South America'].plot(
color='white', edgecolor='black')
gdf.plot(ax=ax, color='red')
plt.show() |
import pyVmomi
from django.shortcuts import render
from extensions.views import tab_extension, TabExtensionDelegate
from infrastructure.models import Server
from resourcehandlers.vmware.pyvmomi_wrapper import get_vm_by_uuid
from resourcehandlers.vmware.models import VsphereResourceHandler
from resourcehandlers.vmware.vmware_41 import TechnologyWrapper
# UI Extension that exposes basic VM / VM Tools information to an end-user in a server-tab
#
# Copyright 2018 Aves-IT B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VMWareDetailsTabDelegate(TabExtensionDelegate):
def should_display(self):
return isinstance(self.instance.resource_handler.cast(), VsphereResourceHandler)
def get_vmware_service_instance(rh):
"""
Gets a service instance object that represents a connection to vCenter,
and which can be used for making API calls.
:param rh: ResourceHandler to get a ServiceInstance for
:type rh: ResourceHandler
:return: ServiceInstance object
"""
assert isinstance(rh.cast(), VsphereResourceHandler)
rh_api = rh.get_api_wrapper()
assert isinstance(rh_api, TechnologyWrapper)
return rh_api._get_connection()
@tab_extension(model=Server, title='VMWare Details', delegate=VMWareDetailsTabDelegate)
def vmware_details_server_tab(request, obj_id):
"""
Renders the VMWare Info tab in the Server view
:param request: the HTTP request
:param obj_id: the server ID
:return: a rendered object
"""
server = Server.objects.get(id=obj_id)
si = get_vmware_service_instance(server.resource_handler)
vm = get_vm_by_uuid(si, server.resource_handler_svr_id)
assert isinstance(vm, pyVmomi.vim.VirtualMachine)
# You can pass basically anything from the pyvmomi vm object here into the template
# Either as raw api result (vm.config.version) or after a modification/lookup (vm.guest.toolsRunningStatus)
conv = {
'guestToolsRunning': 'VMware Tools is running.',
'guestToolsNotRunning': 'VMware Tools is not running.',
'guestToolsExecutingScripts': 'VMware Tools is starting.',
'guestToolsBlacklisted': 'VMware Tools is installed, but should be upgraded immediately due to a known bug',
'guestToolsUnmanaged': 'VMware Tools is installed, but it is not managed by VMWare. Probably open-vm-tools',
'guestToolsNeedUpgrade': 'VMware Tools is installed, but the version is not current.',
'guestToolsSupportedOld': 'VMware Tools is installed, supported, but a newer version is available.',
'guestToolsTooOld': 'VMware Tools is installed, but the version is too old.',
'guestToolsTooNew': 'VMware Tools is installed, but the version is too new for this virtual machine.',
'guestToolsNotInstalled': 'VMware Tools has never been installed.',
'guestToolsSupportedNew': 'VMware Tools is installed, supported, and newer than the version on the host.',
'guestToolsCurrent': 'VMware Tools is installed, and the version is current.'
}
vm_details = {
'vmx_version': vm.config.version,
'vmtools_status': conv.get(vm.guest.toolsRunningStatus, 'Unknown ({})'.format(vm.guest.toolsRunningStatus)),
'vmtools_version': conv.get(vm.guest.toolsVersionStatus2, 'Unknown ({})'.format(vm.guest.toolsVersionStatus2))
}
return render(request, 'vmware_details/templates/vmware_tab.html', dict(server=server, vm_details=vm_details))
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
import sys
import csv
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
#
# Usage:
#
# netsh trace start overwrite=yes report=dis correlation=dis traceFile=quic.etl maxSize=1024 provider={ff15e657-4f26-570e-88ab-0796b258d11c} level=0x5 keywords=0xC0000100
# (Run scenario)
# netsh trace stop
# (Find connection id of interest ("--id N") with "quicetw quic.etl --conn_list")
# quicetw quic.etl --csv --conn_tput --id N --reso 10 > quic.csv
# python quic_plot_conn_tput.py quic.csv
#
# Optional param: pass --nofc to remove FC windows from the plots (they typically dwarf other values).
#
#
# Install/update the following dependencies first to use:
#
# python -m pip install -U pip
# python -m pip install -U matplotlib
#
timeMs = []
txMbps = []
rxMbps = []
rttMs = []
congEvent = []
inFlight = []
cwnd = []
fcStream = []
fcConn = []
# ms,TxMbps,RxMbps,RttMs,CongEvents,InFlight,Cwnd,TxBufBytes,FlowAvailStrm,FlowAvailConn,SsThresh,CubicK,CubicWindowMax,StrmSndWnd
with open(sys.argv[1]) as csvfile:
csvReader = csv.reader(csvfile)
csvReader.__next__() # Skip column header
for row in csvReader:
# Stop processing the file once we don't have all the columns.
if len(row) < 13:
break
timeMs.append(float(row[0]))
txMbps.append(float(row[1]))
rxMbps.append(float(row[2]))
rttMs.append(float(row[3]))
congEvent.append(int(row[4]))
inFlight.append(float(row[5]) / 1000)
cwnd.append(float(row[6]) / 1000)
fcStream.append(float(row[8]) / 1000)
fcConn.append(float(row[9]) / 1000)
fig = plt.figure()
heights = [2, 6, 1, 2]
spec = fig.add_gridspec(4, 1, height_ratios=heights)
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.98, top=0.96, wspace=0.01, hspace=0.05)
axs = fig.add_subplot(spec[0,0])
axs.set_title('Connection Throughput', fontsize=20)
data1, = axs.plot(timeMs, txMbps, label="TX")
data2, = axs.plot(timeMs, rxMbps, label="RX")
plt.legend(handles=[data1, data2], loc='upper right')
axs.set_xticks([])
axs.yaxis.set_major_locator(plticker.MaxNLocator())
axs.set_ylabel('Mbps', fontsize=14)
axs.margins(x=0, y=0)
axs = fig.add_subplot(spec[1,0])
data1, = axs.plot(timeMs, inFlight, label="InFlight")
data2, = axs.plot(timeMs, cwnd, label="Cwnd")
if ("--nofc" in sys.argv):
plt.legend(handles=[data1, data2], loc='upper right')
else:
data3, = axs.plot(timeMs, fcStream, label="FcStream")
data4, = axs.plot(timeMs, fcConn, label="FcConn")
plt.legend(handles=[data1, data2, data3, data4], loc='upper right')
axs.set_xticks([])
axs.yaxis.set_major_locator(plticker.MaxNLocator())
axs.set_ylabel('KB', fontsize=14)
axs.margins(x=0, y=0)
axs = fig.add_subplot(spec[2,0])
data, = axs.plot(timeMs, congEvent, label="Congestion")
plt.legend(handles=[data], loc='upper right')
axs.yaxis.set_major_locator(plticker.MaxNLocator())
axs.set_xticks([])
axs.set_yticks([])
axs.margins(x=0, y=0)
axs = fig.add_subplot(spec[3,0])
data, = axs.plot(timeMs, rttMs, label="RTT")
plt.legend(handles=[data], loc='upper right')
axs.xaxis.set_major_locator(plticker.MaxNLocator())
axs.yaxis.set_major_locator(plticker.MaxNLocator())
axs.set_xlabel('ms', fontsize=14)
axs.set_ylabel('ms', fontsize=14)
axs.margins(x=0, y=0)
mng = plt.get_current_fig_manager()
mng.window.state('zoomed')
plt.legend()
plt.show()
|
#!/usr/bin/env python3
# GUI for the Python scripts of DARx automation. Made on https://github.com/chriskiehl/Gooey
from gooey import Gooey, GooeyParser
import sys
import module_run
import module_home
import module_rvol
import module_calibrate
import config
import streamtologger
import json
def get_positions(): #getting positions of syringe pumps from config_[name of the host].ini
return config.readvalue('antibody'), config.readvalue('reagent'), config.readvalue('needle')
antibody,reagent,needle= get_positions()
@Gooey(program_name="DarX Run", progress_regex=r".*?Cycle: (\d+)/(\d+)$", show_success_modal=False,optional_cols=3, progress_expr="x[0] / x[1] * 100", show_sidebar=True, program_description="Script configurator",image_dir="/home/pi/darx/icons/")
def parse_args():
parser = GooeyParser()
subparsers = parser.add_subparsers(help='options', dest='subparser_name')
home_parser = subparsers.add_parser('Home')
rvol_parser = subparsers.add_parser('Rvol')
prerun_parser = subparsers.add_parser('Pre-run')
run_parser = subparsers.add_parser('Run')
manual_parser = subparsers.add_parser('Manual')
release_parser = subparsers.add_parser('Release')
home_fields = home_parser.add_argument_group('Homing all syringes', 'Current positions of syringes:\nAntibody: {} steps\nReagent: {} steps\nNeedle position: {}\n'.format(antibody,reagent,needle))
rvol_fields = rvol_parser.add_argument_group('Measuring reactor volume', 'The routine permits to measure the reactor volume\nPress Start to execute',gooey_options={'columns': 3})
prerun_fields = prerun_parser.add_argument_group('Pre-run homogenisation routines', 'Executes two in-out cycles to homogenise\nPlease provide the reactor volume in uL')
run_fields = run_parser.add_argument_group('DarX parameters')
manual_fields = manual_parser.add_argument_group('Control induvidial components', 'Press Start to execute', gooey_options={'columns': 3})
release_fields = release_parser.add_argument_group('Release routines', 'Press Start to execute', gooey_options={'columns': 3})
home_fields.add_argument("-home", metavar= "Homing utility", default='Press Start to execute')
rvol_fields.add_argument("-rvol_speed", metavar= "Speed in, ml/min", default=0.5)
prerun_fields.add_argument("-volout", metavar="Pre-run parameters", help='Execute pre-run routines. Indicate the reactor volume',default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000','message': 'Must be between 1 and 10000 uL'
}}, type=int)
prerun_fields.add_argument("-volout_uv", metavar="UV", default=False, action="store_true")
run_fields_optional = run_parser.add_argument_group('Advanced parameters', gooey_options={'columns': 3})
run_fields.add_argument("name", metavar="Experiment name", type=str)
run_fields.add_argument("-onlygenerate", metavar="Generate only", default=False, action="store_true", help="Only generate .py file")
run_fields.add_argument("-rvol", metavar="Reactor volume, uL", default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 uL'
}}, type=int)
run_fields.add_argument("-cycles", metavar="Number of cycles", default=20, gooey_options={'validator':{'test': '0 < int(user_input) <= 200',
'message': 'Must be between 1 and 200 cycles'
}}, type=int)
run_fields.add_argument("-add", metavar="Reagent volume, uL", default=50, gooey_options={'validator':{'test': '0 < int(user_input) <= 100',
'message': 'Must be between 0 and 100 uL'
}}, type=float)
run_fields.add_argument("-coeff", metavar="Reagent decrease coefficient", default=0.95, gooey_options={'validator':{'test': '0 < float(user_input) <= 1',
'message': 'Must be between 0 and 1'
}}, type=float)
run_fields_optional.add_argument("-time", metavar="Incubation time, sec", default=900, gooey_options={'validator':{'test': '0 <= int(user_input)',
'message': 'Must be a positive number'
}}, type=int)
run_fields_optional.add_argument ("-speed_in", metavar="Speed in, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-speed_out", metavar="Speed out, mL/min", default=1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-speed_reagent", metavar="Speed reagent, mL/min", default=1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
run_fields_optional.add_argument ("-uv_time", metavar="Time of UV measurement, sec", default=2, gooey_options={'validator':{'test': '0 <= float(user_input) <= 10',
'message': 'Must be between 0 and 10 sec'
}}, type=float)
run_fields_optional.add_argument ("-mixing_time", metavar="Time of mixing between in/out, sec", default=30, gooey_options={'validator':{'test': '0 <= float(user_input) <= 300',
'message': 'Must be between 0 and 300 sec'
}}, type=float)
manual_fields.add_argument("-reac", metavar="Reactor syringe, uL", default=0, gooey_options={'validator':{'test': '0 <= int(user_input) <= 10000',
'message': 'Must be between 0 and 10000 uL'
}}, type=float)
manual_fields.add_argument ("-reac_speed", metavar="Speed reactor syringe, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 5 ml/min'
}}, type=float)
manual_fields.add_argument("-reac_dir", metavar="Direction (reactor)",choices=['in','out'],default='in')
manual_fields.add_argument("-reag", metavar="Reagent syringe, uL", default=0, gooey_options={'validator':{'test': '0 <= int(user_input) <= 1000',
'message': 'Must be between 0 and 1000 uL'
}}, type=float)
manual_fields.add_argument ("-reag_speed", metavar="Speed reagent syringe, mL/min", default=0.5, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 5',
'message': 'Must be between 0.05 and 5 ml/min'
}}, type=float)
manual_fields.add_argument("-reag_dir", metavar="Direction (reagent)",choices=['in','out'],default='in')
manual_fields.add_argument ("-manual_mixing", metavar="Mixing time, sec", default=0, gooey_options={'validator':{'test': '0 <= float(user_input) <= 6000',
'message': 'Must be between 0 and 6000 sec'
}}, type=float)
manual_fields.add_argument("-manual_needle", metavar="Move needle",choices=['up','down'])
manual_fields.add_argument("-manual_uv", metavar="UV", default=False, action="store_true")
cal_parser = subparsers.add_parser('UV')
cal_fields = cal_parser.add_argument_group('UV Detector Calibration', 'Indicate standard concentrations below (BSA, mg/ml)\nIf no concentration provided, standard will be used', gooey_options={'columns': 6})
for i in range (6):
cal_fields.add_argument ("-conc_{}".format(i+1), metavar="#{}".format(i+1), type=str)
release_fields.add_argument("name", metavar="Experiment name", type=str)
release_fields.add_argument("-rvol", metavar="Reactor volume, uL", default=5000, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 uL'
}}, type=int)
release_fields.add_argument("-time", metavar="Time of the release, min", default=300, gooey_options={'validator':{'test': '0 < int(user_input) <= 10000',
'message': 'Must be between 1 and 10000 minutes'
}}, type=int)
release_fields.add_argument ("-speed_in", metavar="Speed in, mL/min", default=0.1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
release_fields.add_argument ("-speed_out", metavar="Speed out, mL/min", default=0.1, gooey_options={'validator':{'test': '0.05 <= float(user_input) <= 1',
'message': 'Must be between 0.05 and 1 ml/min'
}}, type=float)
release_fields.add_argument("-uv", metavar="UV", default=False, action="store_true")
args = parser.parse_args()
return args
if __name__ == '__main__':
conf = parse_args()
if conf.subparser_name == 'Run':
module_run.run(conf)
elif conf.subparser_name == 'Home':
antibody,reagent,needle=get_positions()
module_home.positions(antibody,reagent,needle)
module_home.home(conf)
elif conf.subparser_name == 'Rvol':
module_rvol.volume(conf)
elif conf.subparser_name == 'Manual':
antibody,reagent,needle=get_positions()
module_run.manual(conf,antibody,reagent,needle)
elif conf.subparser_name == 'UV':
module_calibrate.cal(conf)
elif conf.subparser_name == 'Pre-run':
module_run.prerun(conf)
elif conf.subparser_name == 'Release':
antibody,reagent,needle=get_positions()
module_run.release(conf,antibody,reagent,needle)
|
<filename>src/meltano/core/project.py
"""Meltano Projects."""
from __future__ import annotations
import errno
import logging
import os
import sys
import threading
from contextlib import contextmanager
from pathlib import Path
import fasteners
from dotenv import dotenv_values
from werkzeug.utils import secure_filename
from meltano.core.environment import Environment
from meltano.core.plugin.base import PluginRef
from .behavior.versioned import Versioned
from .error import Error
from .meltano_file import MeltanoFile
from .project_files import ProjectFiles
from .utils import makedirs, truthy
logger = logging.getLogger(__name__)
PROJECT_ROOT_ENV = "MELTANO_PROJECT_ROOT"
PROJECT_ENVIRONMENT_ENV = "MELTANO_ENVIRONMENT"
PROJECT_READONLY_ENV = "MELTANO_PROJECT_READONLY"
class ProjectNotFound(Error):
"""Occurs when a Project is instantiated outside of a meltano project structure."""
def __init__(self, project: Project):
"""Instantiate the error.
Args:
project: the name of the project which cannot be found
"""
super().__init__(
f"Cannot find `{project.meltanofile}`. Are you in a meltano project?"
)
class ProjectReadonly(Error):
"""Occurs when attempting to update a readonly project."""
def __init__(self):
"""Instantiate the error."""
super().__init__("This Meltano project is deployed as read-only")
def walk_parent_directories():
"""Yield each directory starting with the current up to the root.
Yields:
parent directories
"""
directory = os.getcwd()
while True:
yield directory
parent_directory = os.path.dirname(directory)
if parent_directory == directory:
return
directory = parent_directory
class Project(Versioned): # noqa: WPS214
"""Represent the current Meltano project from a file-system perspective."""
__version__ = 1
_activate_lock = threading.Lock()
_find_lock = threading.Lock()
_meltano_rw_lock = fasteners.ReaderWriterLock()
_default = None
def __init__(self, root: Path | str):
"""Instantiate a Project from its root directory.
Args:
root: the root directory for the project
"""
self.root = Path(root).resolve()
self.readonly = False
self._project_files = None
self.__meltano_ip_lock = None
self.active_environment: Environment | None = None
@property
def _meltano_ip_lock(self):
if self.__meltano_ip_lock is None:
self.__meltano_ip_lock = fasteners.InterProcessLock(
self.run_dir("meltano.yml.lock")
)
return self.__meltano_ip_lock
@property
def env(self):
"""Get environment variables for this project.
Returns:
dict of environment variables and values for this project.
"""
environment_name = (
self.active_environment.name if self.active_environment else ""
)
return {
PROJECT_ROOT_ENV: str(self.root),
PROJECT_ENVIRONMENT_ENV: environment_name,
}
@classmethod
@fasteners.locked(lock="_activate_lock")
def activate(cls, project: Project):
"""Activate the given Project.
Args:
project: the Project to activate
Raises:
OSError: if project cannot be activated due to unsupported OS
"""
project.ensure_compatible()
# create a symlink to our current binary
try:
executable = Path(os.path.dirname(sys.executable), "meltano")
if executable.is_file():
project.run_dir().joinpath("bin").symlink_to(executable)
except FileExistsError:
pass
except OSError as error:
if error.errno == errno.EOPNOTSUPP:
logger.warning(
f"Could not create symlink: {error}\nPlease make sure that the underlying filesystem supports symlinks."
)
else:
raise
logger.debug(f"Activated project at {project.root}")
# set the default project
cls._default = project
@classmethod
def deactivate(cls):
"""Deactivate the given Project."""
cls._default = None
@property
def file_version(self):
"""Get the version of Meltano found in this project's meltano.yml.
Returns:
the Project's meltano version
"""
return self.meltano.version
@classmethod
@fasteners.locked(lock="_find_lock")
def find(cls, project_root: Path | str = None, activate=True):
"""Find a Project.
Args:
project_root: The path to the root directory of the project. If not supplied,
infer from PROJECT_ROOT_ENV or the current working directory and it's parents.
activate: Save the found project so that future calls to `find` will
continue to use this project.
Returns:
the found project
Raises:
ProjectNotFound: if the provided `project_root` is not a Meltano project, or
the current working directory is not a Meltano project or a subfolder of one.
"""
if cls._default:
return cls._default
project_root = project_root or os.getenv(PROJECT_ROOT_ENV)
if project_root:
project = Project(project_root)
if not project.meltanofile.exists():
raise ProjectNotFound(project)
else:
for directory in walk_parent_directories():
project = Project(directory)
if project.meltanofile.exists():
break
if not project.meltanofile.exists():
raise ProjectNotFound(Project(os.getcwd()))
# if we activate a project using `find()`, it should
# be set as the default project for future `find()`
if activate:
cls.activate(project)
if truthy(os.getenv(PROJECT_READONLY_ENV, "false")):
project.readonly = True
return project
@property
def project_files(self):
"""Return a singleton ProjectFiles file manager instance.
Returns:
ProjectFiles file manager
"""
if self._project_files is None:
self._project_files = ProjectFiles(
root=self.root, meltano_file_path=self.meltanofile
)
return self._project_files
@property
def meltano(self) -> MeltanoFile:
"""Return a copy of the current meltano config.
Returns:
the current meltano config
"""
with self._meltano_rw_lock.read_lock():
return MeltanoFile.parse(self.project_files.load())
@contextmanager
def meltano_update(self):
"""Yield the current meltano configuration.
Update the meltanofile if the context ends gracefully.
Yields:
the current meltano configuration
Raises:
ProjectReadonly: if this project is readonly
Exception: if project files could not be updated
"""
if self.readonly:
raise ProjectReadonly
# fmt: off
with self._meltano_rw_lock.write_lock(), self._meltano_ip_lock:
meltano_config = MeltanoFile.parse(self.project_files.load())
yield meltano_config
try:
meltano_config = self.project_files.update(meltano_config.canonical())
except Exception as err:
logger.critical("Could not update meltano.yml: %s", err) # noqa: WPS323
raise
# fmt: on
def root_dir(self, *joinpaths):
"""Return the root directory of this project, optionally joined with path.
Args:
joinpaths: list of subdirs and/or file to join to project root.
Returns:
project root joined with provided subdirs and/or file
"""
return self.root.joinpath(*joinpaths)
@contextmanager
def file_update(self):
"""Raise error if project is readonly.
Used in context where project files would be updated.
Yields:
the project root
Raises:
ProjectReadonly: if the project is readonly
"""
if self.readonly:
raise ProjectReadonly
yield self.root
@property
def meltanofile(self):
"""Get the path to this project's meltano.yml.
Returns:
the path to this project meltano.yml
"""
return self.root.joinpath("meltano.yml")
@property
def dotenv(self):
"""Get the path to this project's .env file.
Returns:
the path to this project's .env file
"""
return self.root.joinpath(".env")
@property
def dotenv_env(self):
"""Get values from this project's .env file.
Returns:
values found in this project's .env file
"""
return dotenv_values(self.dotenv)
def activate_environment(self, name: str) -> None:
"""Retrieve an environment configuration.
Args:
name: Name of the environment. Defaults to None.
"""
self.active_environment = Environment.find(self.meltano.environments, name)
@contextmanager
def dotenv_update(self):
"""Raise error if project is readonly.
Used in context where .env files would be updated.
Yields:
the .env file
Raises:
ProjectReadonly: if the project is readonly
"""
if self.readonly:
raise ProjectReadonly
yield self.dotenv
@makedirs
def meltano_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the project `.meltano` directory.
Args:
joinpaths: Paths to join to the `.meltano` directory.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `.meltano` dir optionally joined to given paths.
"""
return self.root.joinpath(".meltano", *joinpaths)
@makedirs
def analyze_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the project `analyze` directory.
Args:
joinpaths: Paths to join to the `analyze` directory.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `analyze` dir optionally joined to given paths.
"""
return self.root_dir("analyze", *joinpaths)
@makedirs
def extract_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the project `extract` directory.
Args:
joinpaths: Paths to join to the `extract` directory.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `extract` dir optionally joined to given paths.
"""
return self.root_dir("extract", *joinpaths)
@makedirs
def venvs_dir(self, *prefixes, make_dirs: bool = True):
"""Path to a `venv` directory in `.meltano`.
Args:
prefixes: Paths to prepend to the `venv` directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `venv` dir optionally prepended with given prefixes.
"""
return self.meltano_dir(*prefixes, "venv", make_dirs=make_dirs)
@makedirs
def run_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the `run` directory in `.meltano`.
Args:
joinpaths: Paths to join to the `run` directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `run` dir optionally joined to given paths.
"""
return self.meltano_dir("run", *joinpaths, make_dirs=make_dirs)
@makedirs
def logs_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the `logs` directory in `.meltano`.
Args:
joinpaths: Paths to join to the `logs` directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `logs` dir optionally joined to given paths.
"""
return self.meltano_dir("logs", *joinpaths, make_dirs=make_dirs)
@makedirs
def job_dir(self, job_id, *joinpaths, make_dirs: bool = True):
"""Path to the `elt` directory in `.meltano/run`.
Args:
job_id: Job ID of `run` dir.
joinpaths: Paths to join to the `elt` directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `elt` dir optionally joined to given paths.
"""
return self.run_dir(
"elt", secure_filename(job_id), *joinpaths, make_dirs=make_dirs
)
@makedirs
def job_logs_dir(self, job_id, *joinpaths, make_dirs: bool = True):
"""Path to the `elt` directory in `.meltano/logs`.
Args:
job_id: Job ID of `logs` dir.
joinpaths: Paths to join to the `elt` directory in `.meltano/logs`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `elt` dir optionally joined to given paths.
"""
return self.logs_dir(
"elt", secure_filename(job_id), *joinpaths, make_dirs=make_dirs
)
@makedirs
def model_dir(self, *joinpaths, make_dirs: bool = True):
"""Path to the `models` directory in `.meltano`.
Args:
joinpaths: Paths to join to the `models` directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to `models` dir optionally joined to given paths.
"""
return self.meltano_dir("models", *joinpaths, make_dirs=make_dirs)
@makedirs
def plugin_dir(self, plugin: PluginRef, *joinpaths, make_dirs: bool = True):
"""Path to the plugin installation directory in `.meltano`.
Args:
plugin: Plugin to retrieve or create directory for.
joinpaths: Paths to join to the plugin installation directory in `.meltano`.
make_dirs: Flag to make directories if not exists.
Returns:
Resolved path to plugin installation dir optionally joined to given paths.
"""
return self.meltano_dir(
plugin.type, plugin.name, *joinpaths, make_dirs=make_dirs
)
@makedirs
def root_plugins_dir(self, *joinpaths: str, make_dirs: bool = True):
"""Path to the project `plugins` directory.
Args:
joinpaths: Paths to join with the project `plugins` directory.
make_dirs: If True, create the directory hierarchy if it does not exist.
Returns:
Path to the project `plugins` directory.
"""
return self.root_dir("plugins", *joinpaths)
@makedirs
def plugin_lock_path(
self,
plugin_type: str,
plugin_name: str,
variant_name: str | None = None,
make_dirs: bool = True,
):
"""Path to the project lock file.
Args:
plugin_type: The plugin type.
plugin_name: The plugin name.
variant_name: The plugin variant name.
make_dirs: If True, create the directory hierarchy if it does not exist.
Returns:
Path to the plugin lock file.
"""
filename = f"{plugin_name}"
if variant_name:
filename = f"{filename}--{variant_name}"
return self.root_plugins_dir(
plugin_type,
f"{filename}.lock",
make_dirs=make_dirs,
)
def __eq__(self, other):
"""Project equivalence check.
Args:
other: The other Project instance to check against.
Returns:
True if Projects are equal.
"""
return hasattr(other, "root") and self.root == other.root # noqa: WPS421
def __hash__(self):
"""Project hash.
Returns:
Project hash.
"""
return self.root.__hash__() # noqa: WPS609
|
<filename>ow/tests/test_catalog.py
from datetime import datetime, timedelta, timezone
import pytest
from repoze.catalog.catalog import Catalog
from repoze.catalog.indexes.field import CatalogFieldIndex
from repoze.catalog.query import Eq
from ow.models.workout import Workout
from ow.models.user import User
from ow.models.root import OpenWorkouts
from ow.catalog import (
get_catalog,
update_indexes,
install_catalog,
remove_from_catalog,
resources_from_query_results
)
class TestCatalog(object):
@pytest.fixture
def root(self):
root = OpenWorkouts()
root['john'] = User(firstname='John', lastname='Doe',
email='<EMAIL>')
root['john'].password = '<PASSWORD>'
workout = Workout(
start=datetime(2015, 6, 28, 12, 55, tzinfo=timezone.utc),
duration=timedelta(minutes=60),
distance=30, sport='cycling'
)
root['john'].add_workout(workout)
return root
def test_update_indexes_no_changes(self, root):
catalog = get_catalog(root)
indexes = root._get_catalog_indexes()
changes = update_indexes(catalog, indexes)
assert changes['added'] == []
assert changes['removed'] == []
def test_update_indexes_added(self, root):
catalog = get_catalog(root)
indexes = root._get_catalog_indexes()
indexes['newindex'] = CatalogFieldIndex('newindex')
changes = update_indexes(catalog, indexes)
assert changes['added'] == ['newindex']
assert changes['removed'] == []
def test_update_indexes_removed(self, root):
catalog = get_catalog(root)
indexes = {'newindex': CatalogFieldIndex('newindex')}
changes = update_indexes(catalog, indexes)
assert changes['added'] == ['newindex']
assert changes['removed'] == ['email', 'nickname', 'sport']
def test_update_indexes_empty(self, root):
catalog = get_catalog(root)
indexes = {}
changes = update_indexes(catalog, indexes)
assert changes['added'] == []
assert changes['removed'] == ['email', 'nickname', 'sport']
def test_install_catalog(self):
root = OpenWorkouts()
assert isinstance(getattr(root, 'catalog', None), Catalog)
del root.catalog
assert getattr(root, 'catalog', None) is None
install_catalog(root)
assert isinstance(getattr(root, 'catalog', None), Catalog)
def test_get_catalog_existing_catalog(self, root):
assert isinstance(getattr(root, 'catalog', None), Catalog)
catalog = get_catalog(root)
assert catalog == root.catalog
def test_get_catalog_not_existing_catalog(self):
root = OpenWorkouts()
assert isinstance(getattr(root, 'catalog', None), Catalog)
del root.catalog
assert getattr(root, 'catalog', None) is None
catalog = get_catalog(root)
assert isinstance(getattr(root, 'catalog', None), Catalog)
assert catalog == root.catalog
def test_get_catalog_root_child(self, root):
user = root['john']
assert getattr(user, 'catalog', None) is None
catalog = get_catalog(user)
assert getattr(user, 'catalog', None) is None
assert isinstance(getattr(root, 'catalog', None), Catalog)
assert catalog == root.catalog
def test_remove_from_catalog(self, root):
catalog = get_catalog(root)
number, results = catalog.query(Eq('sport', 'cycling'))
assert number == 1
remove_from_catalog(catalog, root['john']['1'])
number, results = catalog.query(Eq('sport', 'cycling'))
assert number == 0
def test_resources_from_query_results(self, root):
catalog = get_catalog(root)
number, results = catalog.query(Eq('sport', 'cycling'))
resources = resources_from_query_results(catalog, results, root)
assert root['john']['1'] in list(resources)
|
from maraboupy import Marabou
from maraboupy import MarabouCore
import numpy as np
class marabouEncoding:
def __init__(self):
self.var = {}
def checkProperties(self, prop, networkFile):
# Reading DNN using our own version of reading onnx file
network_verified = Marabou.read_onnx_deepproperty(networkFile)
if prop[0] == "checking-sign":
print("-----------checking targeted digit----------")
return self.checkSign(network_verified, prop[1])
elif prop[0] == "checking-confidence":
print("-----------checking confidence ----------")
return self.checkConfidence(network_verified, prop[1], prop[2])
elif prop[0] == "checking-equivalence":
print("-----------checking equivalence of two network----------")
return self.checkEq(network_verified, prop[1])
elif prop[0] == "checking-fairness":
print("-----------checking fairness----------")
self.checkFair(network_verified, prop[1])
def checkSign(self, network_verified, number):
# This DNN outputs two elements, one is the prediction(dim = 10), the other one is true/false
inputVars_verified = network_verified.inputVars[0] # resize*resize
outputVars_verified = network_verified.outputVars # 2*(43and2)
# Encoding input region
for i in range(len(inputVars_verified)):
network_verified.setLowerBound(inputVars_verified[i], -0.4242)
network_verified.setUpperBound(inputVars_verified[i], 2.8)
# Encoding property network, when property network consider this digit is indeed the one wanted
eq_prop = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_prop.addAddend(1, outputVars_verified[1][1])
eq_prop.addAddend(-1, outputVars_verified[1][0])
eq_prop.setScalar(0) # confidence level? IDK, cause no softmax/relu
disjunction = [[eq_prop]]
network_verified.addDisjunctionConstraint(disjunction)
# Encoding NUV, if any other digit have higher confidence
disjunction = []
for i in range(len(outputVars_verified[0])):
eq_verified = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_verified.addAddend(1, outputVars_verified[0][i])
eq_verified.addAddend(-1, outputVars_verified[0][number])
eq_verified.setScalar(0)
disjunction.append([eq_verified])
network_verified.addDisjunctionConstraint(disjunction)
vals, stats = network_verified.solve()
if vals:
return "sat"
else:
return "unsat"
def checkConfidence(self, network_verified, epsilon, delta):
inputVars_verified = network_verified.inputVars[0] # resize * resize
outputVars_verified = network_verified.outputVars # 2*(43 and resize*resize)
# Encoding input region
for i in range(len(inputVars_verified)):
network_verified.setLowerBound(inputVars_verified[i], -0.4242)
network_verified.setUpperBound(inputVars_verified[i], 2.8)
# Encoding property network, l-inf norm
for i in range(len(outputVars_verified[1])):
eq_property_1 = MarabouCore.Equation(MarabouCore.Equation.LE)
eq_property_1.addAddend(1, outputVars_verified[1][i])
eq_property_1.addAddend(-1, inputVars_verified[i])
eq_property_1.setScalar(epsilon)
network_verified.addDisjunctionConstraint([[eq_property_1]])
eq_property_2 = MarabouCore.Equation(MarabouCore.Equation.LE)
eq_property_2.addAddend(1, inputVars_verified[i])
eq_property_2.addAddend(-1, outputVars_verified[1][i])
eq_property_2.setScalar(epsilon)
network_verified.addDisjunctionConstraint([[eq_property_2]])
# Encoding NUV, assume the input image should be classified as a specific sign, e.g., eight
for j in range(len(outputVars_verified[0])):
disjunction = []
eq_verified = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_verified.addAddend(1, outputVars_verified[0][14])
eq_verified.addAddend(-1, outputVars_verified[0][j])
eq_verified.setScalar(0)
disjunction.append([eq_verified])
network_verified.addDisjunctionConstraint(disjunction)
# Encoding network under verified, mean difference between max_val and all the others
disjunction = []
eq_verified = MarabouCore.Equation(MarabouCore.Equation.LE)
for j in range(len(outputVars_verified[0])):
eq_verified.addAddend(1, outputVars_verified[0][14])
eq_verified.addAddend(-1, outputVars_verified[0][j])
eq_verified.setScalar(delta * 42)
disjunction.append([eq_verified])
network_verified.addDisjunctionConstraint(disjunction)
vals, stats = network_verified.solve()
if vals:
return "sat"
else:
return "unsat"
def checkEq(self, network_verified, epsilon):
inputVars_verified = network_verified.inputVars[0] # resize * resize
outputVars_verified = network_verified.outputVars # 2*(43and43)
print(outputVars_verified)
# Encoding input region
for i in range(len(inputVars_verified)):
network_verified.setLowerBound(inputVars_verified[i], -0.4242)
network_verified.setUpperBound(inputVars_verified[i], 2.8)
disjunction = []
for i in range(len(outputVars_verified[1])):
eq_property_1 = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_property_1.addAddend(1, outputVars_verified[1][i])
eq_property_1.addAddend(-1, outputVars_verified[0][i])
eq_property_1.setScalar(epsilon)
disjunction.append([eq_property_1])
eq_property_2 = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_property_2.addAddend(1, outputVars_verified[0][i])
eq_property_2.addAddend(-1, outputVars_verified[1][i])
eq_property_2.setScalar(epsilon)
disjunction.append([eq_property_2])
network_verified.addDisjunctionConstraint(disjunction)
vals, stats = network_verified.solve()
if vals:
return "sat"
else:
return "unsat"
def checkFair(self, network_verified, epsilon):
inputVars_verified = network_verified.inputVars # 2*(784and784)
outputVars_verified = network_verified.outputVars # 2*(10and10)
for i in range(len(inputVars_verified)):
for j in range(len(inputVars_verified[0])):
network_verified.setLowerBound(inputVars_verified[i][j], -0.4242)
network_verified.setUpperBound(inputVars_verified[i][j], 2.8)
# Non-sensitive feature be the same
for i in range(1, len(inputVars_verified[0])):
disjunction = []
eq_property_1 = MarabouCore.Equation(MarabouCore.Equation.EQ)
eq_property_1.addAddend(1, inputVars_verified[0][i])
eq_property_1.addAddend(-1, inputVars_verified[1][i])
eq_property_1.setScalar(0)
disjunction.append([eq_property_1])
network_verified.addDisjunctionConstraint(disjunction)
# Sensitive feature different, index 0 for now
disjunction = []
eq_property_2 = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_property_2.addAddend(1, inputVars_verified[0][0])
eq_property_2.addAddend(-1, inputVars_verified[1][0])
eq_property_2.setScalar(epsilon)
disjunction.append([eq_property_2])
network_verified.addDisjunctionConstraint(disjunction)
# Encoding NUV, assume the input image should be classified as stop sign (idx: 14)
for i in range(len(outputVars_verified[0])):
disjunction = []
eq_verified = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_verified.addAddend(1, outputVars_verified[0][14])
eq_verified.addAddend(-1, outputVars_verified[0][i])
eq_verified.setScalar(0)
disjunction.append([eq_verified])
network_verified.addDisjunctionConstraint(disjunction)
# Encoding the output of property network, the counter example is classified as other sign
for i in range(len(outputVars_verified[1])):
disjunction = []
eq_verified = MarabouCore.Equation(MarabouCore.Equation.GE)
eq_verified.addAddend(1, outputVars_verified[1][7])
eq_verified.addAddend(-1, outputVars_verified[1][i])
eq_verified.setScalar(0)
disjunction.append([eq_verified])
network_verified.addDisjunctionConstraint(disjunction)
vals, stats = network_verified.solve()
def compute_adv_example(self, networkFile, data, label, target_adv):
network_verified = Marabou.read_onnx(networkFile)
print(data)
print(label)
print(target_adv)
|
bl_info = {
"name": "Audio Proxy",
"category": "Sequencer",
}
import bpy
import os
import ffmpy
import sys
from bpy.app.handlers import persistent
class AudioProxyAddonPreferences(bpy.types.AddonPreferences):
'''Preferences to store proxy file path and format'''
bl_idname = __name__
output_path = bpy.props.StringProperty(name="Path", subtype="FILE_PATH", default="//BL_proxy/audio")
output_format = bpy.props.StringProperty(name="Format", default="ogg")
def draw(self, context):
layout = self.layout
col = layout.column()
col.prop(self,"output_path")
col.prop(self,"output_format")
class AudioProxySequenceProperties(bpy.types.PropertyGroup):
path_original = bpy.props.StringProperty(name="Original Path", subtype="FILE_PATH")
path_proxy = bpy.props.StringProperty(name="Proxy Path", subtype="FILE_PATH")
class AudioProxyUseOrig(bpy.types.Operator):
bl_idname = "audio_proxy.orig"
bl_label = "Use Original"
bl_options = {'REGISTER'}
def execute(self, context):
scene = context.scene
for s in scene.sequence_editor.sequences:
if s.type == 'SOUND':
s.sound.filepath = s.audio_proxy.path_original
return {'FINISHED'}
class AudioProxyUseProxy(bpy.types.Operator):
bl_idname = "audio_proxy.proxy"
bl_label = "Use Proxy"
bl_options = {'REGISTER'}
def execute(self, context):
scene = context.scene
for s in scene.sequence_editor.sequences:
if s.type == 'SOUND':
s.sound.filepath = s.audio_proxy.path_proxy
return {'FINISHED'}
class AudioProxyCreate(bpy.types.Operator):
"""Create Proxy Audio Files"""
bl_idname = "audio_proxy.create"
bl_label = "Create Audio Proxies"
bl_options = {'REGISTER'}
user_preferences = bpy.context.user_preferences
def execute(self, context):
scene = context.scene
addon_prefs = user_preferences.addons[__name__].preferences
for s in scene.sequence_editor.sequences:
if s.type == 'SOUND':
path_old=os.path.realpath(bpy.path.abspath(s.sound.filepath))
path_new=bpy.path.relpath(os.path.join(addon_prefs.output_path,
os.path.basename(s.sound.filepath) + "."+addon_prefs.output_format))
s.audio_proxy.path_original=s.sound.filepath
s.audio_proxy.path_proxy=path_new
# Test if channel directory exists
audio_path=os.path.dirname(bpy.path.abspath(path_new))
print(audio_path)
if not os.path.isdir(audio_path):
os.makedirs(audio_path)
# Test if proxy file already made
if not os.path.isfile(bpy.path.abspath(path_new)):
ff = ffmpy.FFmpeg(
inputs={path_old: None},
outputs={bpy.path.abspath(path_new): ['-vn']}
)
ff.run()
return {'FINISHED'}
class AudioProxySubMenu(bpy.types.Menu):
bl_idname = "AudioProxySubMenu"
bl_label = "Audio Proxy..."
def draw(self, context):
layout = self.layout
layout.operator(AudioProxyCreate.bl_idname)
layout.operator(AudioProxyUseOrig.bl_idname)
layout.operator(AudioProxyUseProxy.bl_idname)
def menu_func(self, context):
self.layout.menu(AudioProxySubMenu.bl_idname)
self.layout.separator()
@persistent
def use_orig(self):
bpy.ops.audio_proxy.orig()
def register():
bpy.utils.register_class(AudioProxyAddonPreferences)
bpy.utils.register_class(AudioProxyUseOrig)
bpy.utils.register_class(AudioProxyUseProxy)
bpy.utils.register_class(AudioProxyCreate)
bpy.utils.register_class(AudioProxySequenceProperties)
bpy.utils.register_class(AudioProxySubMenu)
bpy.types.SEQUENCER_MT_strip.prepend(menu_func)
bpy.types.SoundSequence.audio_proxy = \
bpy.props.PointerProperty(type=AudioProxySequenceProperties)
bpy.app.handlers.render_pre.append(use_orig)
def unregister():
bpy.utils.unregister_class(AudioProxyAddonPreferences)
bpy.utils.unregister_class(AudioProxyUseOrig)
bpy.utils.unregister_class(AudioProxyUseProxy)
bpy.utils.unregister_class(AudioProxyCreate)
bpy.utils.unregister_class(AudioProxySequenceProperties)
bpy.utils.unregister_class(AudioProxySubMenu)
bpy.types.SEQUENCER_MT_strip.remove(menu_func)
del bpy.types.SoundSequence.audio_proxy
if __name__ == "__main__":
register()
|
import tensorflow as tf
from tensorflow.python.layers.core import Dense
import numpy as np
import time
# Number of Epochs
epochs = 1000
# Batch Size
batch_size = 50
# RNN Size
rnn_size = 50
# Number of Layers
num_layers = 2
# Embedding Size
encoding_embedding_size = 15
decoding_embedding_size = 15
# Learning Rate
learning_rate = 0.001
source = open("data/source.txt",'w')
target = open("data/target.txt",'w')
with open("data/对联.txt",'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split(" ")
source.write(line[0]+'\n')
target.write(line[1]+'\n')
source.close()
target.close()
with open('data/source.txt','r',encoding='utf-8') as f:
source_data = f.read()
with open('data/target.txt','r',encoding='utf-8') as f:
target_data = f.read()
print(source_data.split('\n')[:10])
print(target_data.split('\n')[:10])
def extract_character_vocab(data):
special_words = ['<PAD>','<UNK>','<GO>','<EOS>']
set_words = list(set([character for line in data.split('\n') for character in line]))
int_to_vocab = {idx:word for idx,word in enumerate(special_words + set_words)}
vocab_to_int = {word:idx for idx,word in int_to_vocab.items()}
return int_to_vocab,vocab_to_int
source_int_to_letter,source_letter_to_int = extract_character_vocab(source_data+target_data)
target_int_to_letter,target_letter_to_int = extract_character_vocab(source_data+target_data)
source_int = [[source_letter_to_int.get(letter,source_letter_to_int['<UNK>'])
for letter in line] for line in source_data.split('\n')]
target_int = [[target_letter_to_int.get(letter, target_letter_to_int['<UNK>'])
for letter in line] + [target_letter_to_int['<EOS>']] for line in target_data.split('\n')]
print(source_int)
print(target_int)
def get_inputs():
inputs = tf.placeholder(tf.int32,[None,None],name='inputs')
targets = tf.placeholder(tf.int32,[None,None],name='targets')
learning_rate = tf.placeholder(tf.float32,name='learning_rate')
target_sequence_length = tf.placeholder(tf.int32,(None,),name='target_sequence_length')
max_target_sequence_length = tf.reduce_max(target_sequence_length,name='max_target_len')
source_sequence_length = tf.placeholder(tf.int32,(None,),name='source_sequence_length')
return inputs,targets,learning_rate,target_sequence_length,max_target_sequence_length,source_sequence_length
def get_encoder_layer(input_data,rnn_size,num_layers,source_sequence_length,source_vocab_size,encoding_embedding_size):
embed_sequence(
ids,
vocab_size=None,
embed_dim=None,
unique=False,
initializer=None,
regularizer=None,
trainable=True,
scope=None,
reuse=None
)
ids: [batch_size, doc_length] Tensor of type int32 or int64 with symbol ids.
return : Tensor of [batch_size, doc_length, embed_dim] with embedded sequences.
"""
encoder_embed_input = tf.contrib.layers.embed_sequence(input_data,source_vocab_size,encoding_embedding_size)
def get_lstm_cell(rnn_size):
lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size,initializer=tf.random_uniform_initializer(-0.1,0.1,seed=2))
return lstm_cell
cell = tf.contrib.rnn.MultiRNNCell([get_lstm_cell(rnn_size) for _ in range(num_layers)])
encoder_output , encoder_state = tf.nn.dynamic_rnn(cell,encoder_embed_input,sequence_length=source_sequence_length,dtype=tf.float32)
return encoder_output,encoder_state
def process_decoder_input(data,vocab_to_int,batch_size):
ending = tf.strided_slice(data,[0,0],[batch_size,-1],[1,1])
decoder_input = tf.concat([tf.fill([batch_size,1],vocab_to_int['<GO>']),ending],1)
return decoder_input
def decoding_layer(target_letter_to_int,decoding_embedding_size,num_layers,rnn_size,
target_sequence_length,max_target_sequence_length,encoder_state,decoder_input):
# 1. Embedding
target_vocab_size = len(target_letter_to_int)
decoder_embeddings = tf.Variable(tf.random_uniform([target_vocab_size,decoding_embedding_size]))
decoder_embed_input = tf.nn.embedding_lookup(decoder_embeddings,decoder_input)
def get_decoder_cell(rnn_size):
decoder_cell = tf.contrib.rnn.LSTMCell(rnn_size,initializer=tf.random_uniform_initializer(-0.1,0.1,seed=2))
return decoder_cell
cell = tf.contrib.rnn.MultiRNNCell([get_decoder_cell(rnn_size) for _ in range(num_layers)])
# Output
# target_vocab_size定义了输出层的大小
output_layer = Dense(target_vocab_size,kernel_initializer=tf.truncated_normal_initializer(mean=0.1,stddev=0.1))
# 4. Training decoder
with tf.variable_scope("decode"):
training_helper = tf.contrib.seq2seq.TrainingHelper(inputs = decoder_embed_input,
sequence_length = target_sequence_length,
time_major = False)
training_decoder = tf.contrib.seq2seq.BasicDecoder(cell,training_helper,encoder_state,output_layer)
training_decoder_output,_,_ = tf.contrib.seq2seq.dynamic_decode(training_decoder,impute_finished=True,
maximum_iterations = max_target_sequence_length)
# 5. Predicting decoder
with tf.variable_scope("decode",reuse=True):
start_tokens = tf.tile(tf.constant([target_letter_to_int['<GO>']],dtype=tf.int32),[batch_size],name='start_token')
predicting_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(decoder_embeddings,start_tokens,target_letter_to_int['<EOS>'])
predicting_decoder = tf.contrib.seq2seq.BasicDecoder(cell,
predicting_helper,
encoder_state,
output_layer)
predicting_decoder_output,_,_ = tf.contrib.seq2seq.dynamic_decode(predicting_decoder,impute_finished = True,
maximum_iterations = max_target_sequence_length)
return training_decoder_output,predicting_decoder_output
def seq2seq_model(input_data,targets,lr,target_sequence_length,max_target_sequence_length,
source_sequence_length,source_vocab_size,target_vocab_size,encoder_embedding_size,
decoder_embedding_size,rnn_size,num_layers):
_,encoder_state = get_encoder_layer(input_data,
rnn_size,
num_layers,
source_sequence_length,
source_vocab_size,
encoding_embedding_size)
decoder_input = process_decoder_input(targets,target_letter_to_int,batch_size)
training_decoder_output,predicting_decoder_output = decoding_layer(target_letter_to_int,
decoding_embedding_size,
num_layers,
rnn_size,
target_sequence_length,
max_target_sequence_length,
encoder_state,
decoder_input)
return training_decoder_output,predicting_decoder_output
train_graph = tf.Graph()
with train_graph.as_default():
input_data, targets, lr, target_sequence_length, max_target_sequence_length, source_sequence_length = get_inputs()
training_decoder_output, predicting_decoder_output = seq2seq_model(input_data,
targets,
lr,
target_sequence_length,
max_target_sequence_length,
source_sequence_length,
len(source_letter_to_int),
len(target_letter_to_int),
encoding_embedding_size,
decoding_embedding_size,
rnn_size,
num_layers)
training_logits = tf.identity(training_decoder_output.rnn_output,'logits')
predicting_logits = tf.identity(predicting_decoder_output.sample_id,name='predictions')
#tf.sequence_mask([1, 3, 2], 5) # [[True, False, False, False, False],
# [True, True, True, False, False],
# [True, True, False, False, False]]
masks = tf.sequence_mask(target_sequence_length,max_target_sequence_length,dtype=tf.float32,name="masks")
# logits: A Tensor of shape [batch_size, sequence_length, num_decoder_symbols] and dtype float.
# The logits correspond to the prediction across all classes at each timestep.
#targets: A Tensor of shape [batch_size, sequence_length] and dtype int.
# The target represents the true class at each timestep.
#weights: A Tensor of shape [batch_size, sequence_length] and dtype float.
# weights constitutes the weighting of each prediction in the sequence. When using weights as masking,
# set all valid timesteps to 1 and all padded timesteps to 0, e.g. a mask returned by tf.sequence_mask.
with tf.name_scope("optimization"):
cost = tf.contrib.seq2seq.sequence_loss(
training_logits,
targets,
masks
)
optimizer = tf.train.AdamOptimizer(lr)
gradients = optimizer.compute_gradients(cost)
capped_gradients = [(tf.clip_by_value(grad, -5., 5.), var) for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(capped_gradients)
def pad_sentence_batch(sentence_batch,pad_int):
max_sentence = max([len(sentence) for sentence in sentence_batch])
return [sentence + [pad_int] * (max_sentence - len(sentence)) for sentence in sentence_batch]
def get_batches(targets,sources,batch_size,source_pad_int,target_pad_int):
for batch_i in range(0,len(sources)//batch_size):
start_i = batch_i * batch_size
sources_batch = sources[start_i : start_i + batch_size]
targets_batch = targets[start_i : start_i + batch_size]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch,source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch,target_pad_int))
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
yield pad_targets_batch,pad_sources_batch,targets_lengths,source_lengths
# Train
train_source = source_int[batch_size:]
train_target = target_int[batch_size:]
valid_source = source_int[:batch_size]
valid_target = target_int[:batch_size]
(valid_targets_batch, valid_sources_batch, valid_targets_lengths, valid_sources_lengths) = next(get_batches(valid_target, valid_source, batch_size,
source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']))
display_step = 50
checkpoint = "data/trained_model.ckpt"
with tf.Session(graph=train_graph) as sess:
sess.run(tf.global_variables_initializer())
print()
for epoch_i in range(1,epochs+1):
for batch_i,(targets_batch, sources_batch, targets_lengths, sources_lengths) in enumerate(get_batches(
train_target,train_source,batch_size,source_letter_to_int['<PAD>'],
target_letter_to_int['<PAD>']
)):
_,loss = sess.run([train_op,cost],feed_dict={
input_data:sources_batch,
targets:targets_batch,
lr:learning_rate,
target_sequence_length:targets_lengths,
source_sequence_length:sources_lengths
})
if batch_i % display_step == 0:
validation_loss = sess.run(
[cost],
{input_data: valid_sources_batch,
targets: valid_targets_batch,
lr: learning_rate,
target_sequence_length: valid_targets_lengths,
source_sequence_length: valid_sources_lengths})
print('Epoch {:>3}/{} Batch {:>4}/{} - Training Loss: {:>6.3f} - Validation loss: {:>6.3f}'
.format(epoch_i,
epochs,
batch_i,
len(train_source) // batch_size,
loss,
validation_loss[0]))
saver = tf.train.Saver()
saver.save(sess, checkpoint)
print('Model Trained and Saved')
def source_to_seq(text):
sequence_length = 7
return [source_letter_to_int.get(word,source_letter_to_int['<UNK>']) for word in text] + [source_letter_to_int['<PAD>']] * (sequence_length - len(text))
input_word = ' '
text = source_to_seq(input_word)
checkpoint = "data/trained_model.ckpt"
loaded_graph = tf.Graph()
with tf.Session(graph=loaded_graph) as sess:
loader = tf.train.import_meta_graph(checkpoint+'.meta')
loader.restore(sess,checkpoint)
input_data = loaded_graph.get_tensor_by_name('inputs:0')
logits = loaded_graph.get_tensor_by_name('predictions:0')
source_sequence_length = loaded_graph.get_tensor_by_name('source_sequence_length:0')
target_sequence_length = loaded_graph.get_tensor_by_name('target_sequence_length:0')
answer_logits = sess.run(logits, {input_data: [text] * batch_size,
target_sequence_length: [len(input_word)] * batch_size,
source_sequence_length: [len(input_word)] * batch_size})[0]
pad = source_letter_to_int["<PAD>"]
print('原始输入:', input_word)
print('\nSource')
print(' Word 编号: {}'.format([i for i in text]))
print(' Input Words: {}'.format(" ".join([source_int_to_letter[i] for i in text])))
print('\nTarget')
print(' Word 编号: {}'.format([i for i in answer_logits if i != pad]))
print(' Response Words: {}'.format(" ".join([target_int_to_letter[i] for i in answer_logits if i != pad])))
|
<gh_stars>0
"""
Draw an interactive comparison plot of named result dictionaries.
The plot can plot many results for large numbers of parameters
against each other.
The plot can answer the following questions:
1. How are the parameters distributed?
2. How large are the differences in parameter estimates between results
compared to the uncertainty around the parameter estimates?
3. Are parameters of groups of results clustered?
"""
from bokeh.layouts import gridplot
from bokeh.models import BoxSelectTool
from bokeh.models import ColumnDataSource
from bokeh.models import HoverTool
from bokeh.models import TapTool
from bokeh.models import Title
from bokeh.models.callbacks import CustomJS
from bokeh.models.widgets import CheckboxGroup
from bokeh.plotting import figure
from bokeh.plotting import show
from estimagic.visualization.comparison_plot_data_preparation import (
comparison_plot_inputs,
)
def comparison_plot(
results,
color_dict=None,
height=None,
width=500,
axis_for_every_parameter=False,
x_padding=0.1,
num_bins=50,
):
"""Make a comparison plot from a dictionary containing optimization results.
Args:
results (list): List of estimagic optimization results where the info
can have been extended with 'model' and 'model_name'
color_dict (dict):
mapping from the model class names to colors.
height (int):
height of the plot.
width (int):
width of the plot (in pixels).
axis_for_every_parameter (bool):
if False the x axis is only shown once for every group of parameters.
x_padding (float): the x_range is extended on each side by x_padding
times the range of the data
num_bins (int): number of bins
Returns:
source_dfs, grid
"""
source_dfs, plot_info = comparison_plot_inputs(
results=results,
x_padding=x_padding,
num_bins=num_bins,
color_dict=color_dict,
fig_height=height,
)
source_dict, figure_dict, glyph_dict = _create_comparison_plot_components(
source_dfs=source_dfs,
plot_info=plot_info,
axis_for_every_parameter=axis_for_every_parameter,
width=width,
)
model_classes = sorted({res.info["model_class"] for res in results})
plots_with_callbacks = _add_callbacks(
source_dict=source_dict,
figure_dict=figure_dict,
glyph_dict=glyph_dict,
model_classes=model_classes,
)
grid = gridplot(plots_with_callbacks, toolbar_location="right", ncols=1)
show(grid)
return source_dfs, grid
def _create_comparison_plot_components(
source_dfs, plot_info, width, axis_for_every_parameter
):
source_dict = {group_name: {} for group_name in source_dfs.keys()}
figure_dict = {group_name: {} for group_name in source_dfs.keys()}
glyph_dict = {group_name: {} for group_name in source_dfs.keys()}
for group, param_to_df in source_dfs.items():
group_info = plot_info["group_info"][group]
title_fig = figure(
title=Title(
text="Comparison Plot of " + group.title() + " Parameters",
align="center",
text_font_size="15pt",
),
plot_height=50,
plot_width=width,
tools="reset,save",
)
_style_title_fig(title_fig)
figure_dict[group]["__title__"] = title_fig
for i, (param, df) in enumerate(param_to_df.items()):
param_src = ColumnDataSource(df.reset_index())
param_plot = figure(
title=df["name"].unique()[0],
plot_height=plot_info["plot_height"],
plot_width=width,
tools="reset,save",
y_axis_location="left",
x_range=group_info["x_range"],
y_range=plot_info["y_range"],
)
point_glyph = param_plot.rect(
source=param_src,
x="binned_x",
y="dodge",
width=group_info["width"],
height=1,
color="color",
selection_color="color",
nonselection_color="color",
alpha=0.5,
selection_alpha=0.7,
nonselection_alpha=0.3,
)
_add_hover_tool(param_plot, point_glyph, df)
param_plot.hbar(
source=param_src,
y="dodge",
left="conf_int_lower",
right="conf_int_upper",
height=0.01,
alpha=0.0,
selection_alpha=0.7,
nonselection_alpha=0.0,
color="color",
selection_color="color",
nonselection_color="color",
)
is_last = i == len(param_to_df)
_style_plot(
fig=param_plot,
last=is_last,
axis_for_every_parameter=axis_for_every_parameter,
)
figure_dict[group][param] = param_plot
source_dict[group][param] = param_src
glyph_dict[group][param] = point_glyph
return source_dict, figure_dict, glyph_dict
def _add_hover_tool(plot, point_glyph, df):
top_cols = ["model", "name", "value"]
optional_cols = ["model_class", "conf_int_lower", "conf_int_upper"]
for col in optional_cols:
if len(df[col].unique()) > 1:
top_cols.append(col)
tooltips = [(col, "@" + col) for col in top_cols]
hover = HoverTool(renderers=[point_glyph], tooltips=tooltips)
plot.tools.append(hover)
def _add_callbacks(source_dict, figure_dict, glyph_dict, model_classes):
"""Add checkbox for selecting model classes and tap tools."""
all_src = _flatten_dict(source_dict)
plots_with_callbacks = [
_create_checkbox(widget_labels=model_classes, all_src=all_src)
]
for group, param_to_figure in figure_dict.items():
for param, param_plot in param_to_figure.items():
if param != "__title__":
param_src = source_dict[group][param]
point_glyph = glyph_dict[group][param]
other_src = _flatten_dict(source_dict, param)
_add_select_tools(
current_src=param_src,
other_src=other_src,
param_plot=param_plot,
point_glyph=point_glyph,
)
plots_with_callbacks.append(param_plot)
return plots_with_callbacks
def _flatten_dict(nested_dict, exclude_key=None):
"""
Return a list of the values of the values of a nested dictionary.
This is used to collect all ColumnDataSources except the one modified by the user
(e.g. by clicking on one of its parameters).
The ``source_dict`` has the structure {group: {param: CDS})}
Args:
nested_dict (dict): nested dictionary whose inner values are to be returned
exclude_key:
key possibly in one of the inner dictionaries whose value is to be excluded.
"""
flattened = []
for inner_dict in nested_dict.values():
for inner_key, inner_val in inner_dict.items():
if exclude_key is None or inner_key != exclude_key:
flattened.append(inner_val)
return flattened
def _add_select_tools(current_src, other_src, param_plot, point_glyph):
select_js_kwargs = {"current_src": current_src, "other_src": other_src}
select_js_code = """
// adapted from https://stackoverflow.com/a/44996422
var chosen = current_src.selected.indices;
if (typeof(chosen) == "number"){
var chosen = [chosen]
};
var chosen_models = [];
for (var i = 0; i < chosen.length; ++ i){
chosen_models.push(current_src.data['model'][chosen[i]])
};
var chosen_models_indices = [];
for (var i = 0; i < current_src.data['index'].length; ++ i){
if (chosen_models.includes(current_src.data['model'][i])){
chosen_models_indices.push(i)
};
};
current_src.selected.indices = chosen_models_indices;
current_src.change.emit();
for (var i = 0; i < other_src.length; ++i){
var chosen_models_indices = [];
for (var j = 0; j < other_src[i].data['index'].length; ++ j){
if (chosen_models.includes(other_src[i].data['model'][j])){
chosen_models_indices.push(j)
};
};
other_src[i].selected.indices = chosen_models_indices;
other_src[i].change.emit();
};
"""
select_callback = CustomJS(args=select_js_kwargs, code=select_js_code)
# point_glyph as only renderer assures that when a point is chosen
# only that point's model is chosen
# this makes it impossible to choose models based on clicking confidence bands
tap = TapTool(renderers=[point_glyph], callback=select_callback)
param_plot.tools.append(tap)
boxselect = BoxSelectTool(renderers=[point_glyph], callback=select_callback)
param_plot.tools.append(boxselect)
def _create_checkbox(widget_labels, all_src):
widget_js_kwargs = {"all_src": all_src, "group_list": widget_labels}
widget_js_code = """
// adapted from https://stackoverflow.com/a/36145278
var chosen_inds = cb_obj.active;
var chosen_widget_groups = [];
for (var i = 0; i < group_list.length; ++ i){
if (chosen_inds.includes(i)){
chosen_widget_groups.push(group_list[i])
};
};
for (var j = 0; j < all_src.length; ++ j){
to_select_inds = []
for (var i = 0; i < all_src[j].data['index'].length; ++ i){
if (chosen_widget_groups.includes(all_src[j].data['model_class'][i])){
to_select_inds.push(i)
};
};
all_src[j].selected.indices = to_select_inds;
all_src[j].change.emit();
};
"""
widget_callback = CustomJS(args=widget_js_kwargs, code=widget_js_code)
cb_group = CheckboxGroup(
labels=widget_labels,
active=[0] * len(widget_labels),
callback=widget_callback,
inline=True,
)
return cb_group
def _style_title_fig(fig):
fig.line([], []) # add renderer to avoid warning
fig.ygrid.visible = False
fig.xgrid.visible = False
fig.outline_line_color = None
fig.yaxis.axis_line_color = None
fig.xaxis.axis_line_color = None
def _style_plot(fig, last, axis_for_every_parameter):
_style_x_axis(fig=fig, last=last, axis_for_every_parameter=axis_for_every_parameter)
_style_y_axis(fig=fig)
fig.title.vertical_align = "top"
fig.title.text_alpha = 70
fig.title.text_font_style = "normal"
fig.outline_line_color = None
fig.min_border_top = 20
fig.min_border_bottom = 20
fig.xgrid.visible = False
fig.ygrid.visible = False
fig.sizing_mode = "scale_width"
def _style_x_axis(fig, last, axis_for_every_parameter):
if not axis_for_every_parameter:
if last:
fig.xaxis.visible = False
else:
fig.xaxis.axis_line_color = None
xmin = fig.x_range.start
xmax = fig.x_range.end
fig.line([xmin, xmax], [0, 0], line_color="black")
fig.xaxis.minor_tick_line_color = None
def _style_y_axis(fig):
fig.yaxis.minor_tick_line_color = None
fig.yaxis.axis_line_color = None
fig.yaxis.major_tick_line_color = None
|
<filename>src/msla/utils.py
from flask import session, request, url_for
from msla import app, db
from .models import Log, FileUpload
from user_agents import parse
from werkzeug import secure_filename
from datetime import datetime
import hashlib
import psutil
import subprocess
import os, re
def searchResult(searchColumn, searchInfo, page):
if searchColumn == "source_ip":
results = db.session.query(Log).filter(Log.source_ip.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "date":
results = db.session.query(Log).filter(Log.date.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "time":
results = db.session.query(Log).filter(Log.time.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "source_port":
results = db.session.query(Log).filter(Log.source_port.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "dest_ip":
results = db.session.query(Log).filter(Log.dest_ip.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "dest_port":
results = db.session.query(Log).filter(Log.dest_port.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "os":
results = db.session.query(Log).filter(Log.os.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "message":
results = db.session.query(Log).filter(Log.message.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
elif searchColumn == "detailed_message":
results = db.session.query(Log).filter(Log.detailed_message.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
else:
results = db.session.query(Log).filter(Log.source_ip.like('%'+searchInfo+'%')|Log.date.like('%'+searchInfo+'%')|Log.time.like('%'+searchInfo+'%')|Log.source_port.like('%'+searchInfo+'%')|Log.dest_ip.like('%'+searchInfo+'%')|Log.dest_port.like('%'+searchInfo+'%')|Log.os.like('%'+searchInfo+'%')|Log.browser.like('%'+searchInfo+'%')|Log.message.like('%'+searchInfo+'%')|Log.detailed_message.like('%'+searchInfo+'%')).slice((page-1)*100,page*100)
return results
def searchCount(searchColumn, searchInfo):
if searchColumn == "source_ip":
result = db.session.query(Log).filter(Log.source_ip.like('%'+searchInfo+'%')).count()
elif searchColumn == "date":
result = db.session.query(Log).filter(Log.date.like('%'+searchInfo+'%')).count()
elif searchColumn == "time":
result = db.session.query(Log).filter(Log.time.like('%'+searchInfo+'%')).count()
elif searchColumn == "source_port":
result = db.session.query(Log).filter(Log.source_port.like('%'+searchInfo+'%')).count()
elif searchColumn == "dest_ip":
result = db.session.query(Log).filter(Log.dest_ip.like('%'+searchInfo+'%')).count()
elif searchColumn == "dest_port":
result = db.session.query(Log).filter(Log.dest_port.like('%'+searchInfo+'%')).count()
elif searchColumn == "os":
result = db.session.query(Log).filter(Log.os.like('%'+searchInfo+'%')).count()
elif searchColumn == "message":
result = db.session.query(Log).filter(Log.message.like('%'+searchInfo+'%')).count()
elif searchColumn == "detailed_message":
result = db.session.query(Log).filter(Log.detailed_message.like('%'+searchInfo+'%')).count()
else:
result = db.session.query(Log).filter(Log.source_ip.like('%'+searchInfo+'%')|Log.date.like('%'+searchInfo+'%')|Log.time.like('%'+searchInfo+'%')|Log.source_port.like('%'+searchInfo+'%')|Log.dest_ip.like('%'+searchInfo+'%')|Log.dest_port.like('%'+searchInfo+'%')|Log.os.like('%'+searchInfo+'%')|Log.browser.like('%'+searchInfo+'%')|Log.message.like('%'+searchInfo+'%')|Log.detailed_message.like('%'+searchInfo+'%')).count()
return result
def sha512(string):
return hashlib.sha512(string).hexdigest()
def deleteLog(fileID):
fileUpload = db.session.query(FileUpload).filter_by(id=fileID).first() # select * from FileUpload where id=fileID limit 1
if fileUpload:
filename = os.path.join(fileUpload.path, fileUpload.filename)
db.session.delete(fileUpload) #delete log location record
db.session.commit()
db.session.close()
os.remove(filename) #delete log
os.rmdir(fileUpload.path)
return "Delete file successfully!"
return "Error!"
def saveFile(files):
for file in files:
filename = secure_filename(file.filename) # prevent LFI attack
if len(filename) <= 0:
continue
md5hash = hashlib.md5(os.urandom(64)).hexdigest()
base = os.path.dirname(os.path.dirname(__file__))
if not os.path.exists(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash)):
os.makedirs(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash))
file.save(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash, filename))
fileUpload = FileUpload(os.path.join(base, app.config['UPLOAD_FOLDER'], md5hash), filename)
db.session.add(fileUpload)
db.session.commit()
db.session.close()
def url_for_other_page(page):
args = request.view_args.copy()
args['page'] = page
return url_for(request.endpoint, **args)
def getSrcIP():
logs = db.session.query(Log.source_ip).order_by(Log.source_ip).distinct().slice(0,5)
srcIP = []
for log in logs:
tg = {"name":"","number":""}
if log.source_ip == "":
tg['name'] = "Unknown"
else:
tg['name'] = log.source_ip
rs = db.session.query(Log.id).filter_by(source_ip=log.source_ip).count()
tg['number'] = rs
srcIP.append(tg)
db.session.close()
return srcIP
def getBrowser():
logs = db.session.query(Log.browser).order_by(Log.browser).distinct()
browser = []
for log in logs:
tg = {"name":"","number":""}
tg['name'] = log.browser
rs = db.session.query(Log.id).filter_by(browser=log.browser).count()
tg['number'] = rs
browser.append(tg)
db.session.close()
return browser
def getDate():
logs = db.session.query(Log.date).distinct()
date = []
for log in logs:
tg = {"name":"","number":""}
if log.date == "":
tg['name'] = "Unknown"
else:
tg['name'] = log.date
rs = db.session.query(Log.id).filter_by(date=log.date).count()
tg['number'] = rs
date.append(tg)
db.session.close()
return date
def getOs():
logs = db.session.query(Log.os).order_by(Log.os).distinct()
os = []
for log in logs:
tg = {"name":"","number":""}
tg['name'] = log.os
rs = db.session.query(Log.id).filter_by(os=log.os).count()
tg['number'] = rs
os.append(tg)
db.session.close()
return os
def getAttack():
logs = db.session.query(Log.message).order_by(Log.message).distinct().slice(0,3)
attack = []
for log in logs:
tg = {"name":"","number":""}
if log.message == "":
tg['name'] = "Unknown"
else:
tg['name'] = log.message
rs = db.session.query(Log.id).filter_by(message=log.message).count()
tg['number'] = rs
attack.append(tg)
db.session.close()
return attack
def getTotal():
total = db.session.query(Log.id).count()
db.session.close()
return total
def isauth():
if 'id' in session:
return True
else:
return False
def bytes2human(n):
# http://code.activestate.com/recipes/578019
# >>> bytes2human(10000)
# '9.8K'
# >>> bytes2human(100001221)
# '95.4M'
symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
prefix = {}
for i, s in enumerate(symbols):
prefix[s] = 1 << (i + 1) * 10
for s in reversed(symbols):
if n >= prefix[s]:
value = float(n) / prefix[s]
return '%.1f%s' % (value, s)
return "%sB" % n
def diskUsage():
result = []
templ = "%s %s %s %s %s%% %s %s"
for part in psutil.disk_partitions(all=False):
if os.name == 'nt':
if 'cdrom' in part.opts or part.fstype == '':
# skip cd-rom drives with no disk in it; they may raise
# ENOENT, pop-up a Windows GUI error for a non-ready
# partition or just hang.
continue
usage = psutil.disk_usage(part.mountpoint)
result.append((templ % (
part.device,
bytes2human(usage.total),
bytes2human(usage.used),
bytes2human(usage.free),
int(usage.percent),
part.fstype,
part.mountpoint)))
return result
def cpu():
return psutil.cpu_percent(interval=1)
def ram():
return psutil.virtual_memory().percent
def who():
users = psutil.users()
result = []
for user in users:
result.append("%-15s %-15s %s (%s)" % (
user.name,
user.terminal or '-',
datetime.fromtimestamp(user.started).strftime("%Y-%m-%d %H:%M"),
user.host))
return result
def ipaddress():
ips = subprocess.getoutput(["/sbin/ifconfig | grep -i \"inet\" | grep -iv \"inet6\" | " + "awk {'print $2'} | sed -ne 's/addr\:/ /p'"])
iface = subprocess.getoutput(["/sbin/ifconfig | cut -d \" \" -f1"])
ips = ips.split("\n")
ifaces = []
for i in iface.split("\n"):
if i == "":
pass
else:
ifaces.append(i)
result = []
for i in range(len(ips)):
result.append(ifaces[i]+": "+ips[i])
return result
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def ua_parse(ua):
user_agent = parse(ua)
bname = user_agent.browser.family
platform = user_agent.os.family
version= user_agent.browser.version
result = [bname, platform]
return result
def parse_block(block, vals):
matchDate = re.search(r'\d{2}/[JFMMJASOND]\w{2,}/\d{4}', block) #Date
if matchDate:
vals['date'] = matchDate.group()
matchTime = re.search(r'\d{2}:\d{2}:\d{2} [\+\-]\d{4}', block) #Time
if matchTime:
vals['time'] = matchTime.group()
matchIP = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} \d{1,5} \d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3} \d{1,5}', block) #IP
if matchIP:
data = matchIP.group().split(" ")
vals['srcIP'] = data[0]
vals['srcPort'] = data[1]
vals['destIP'] = data [2]
vals['destPort'] = data[3]
matchGet = re.search(r'(GET|POST) ((/\w+)?)+(/\w+(\.\w+)?)', block) #Get Address
if matchGet:
vals['getAdd'] = matchGet.group()
matchUseragent = re.search(r'[Uu]ser-[aA]gent: .+', block)
if matchUseragent:
vals['userAgent']= matchUseragent.group()
matchMes = re.findall(r'\[msg \".+\.\"\]', block)
vals['mes'] = ''
for mes in matchMes:
vals['mes'] = vals['mes'] + mes[6:-2] + " | "
vals['mes'] = vals['mes'][:-2]
matchMes = re.search(r'-H--\s(Message:.+\s)+', block)
vals['detailMes'] = ''
if matchMes:
vals['detailMes'] = matchMes.group()
ua = ua_parse(vals['userAgent']) #parse userAgent : OS , Browser
vals['os'] = ua[1]
vals['browser'] = ua[0]
newLog = Log(vals['date'], vals['time'], vals['srcIP'], vals['srcPort'], vals['destIP'], vals['destPort'], vals['getAdd'], vals['os'], vals['browser'], vals['mes'], vals['detailMes'])
db.session.add(newLog)
db.session.commit()
db.session.close()
return vals
def parse_log(fileID):
fileUpload = FileUpload.query.filter_by(id=fileID).first()
db.session.close()
if fileUpload:
if fileUpload.im=='True': #check import to database
return "File imported to database!"
db.session.query(FileUpload).filter_by(id=fileID).update({'im': 'True'})
db.session.commit()
db.session.close()
filename = os.path.join(fileUpload.path, fileUpload.filename)
else:
return "Error!"
#get phase information on log file
if os.path.isfile(filename):
i = 0
error_block = ''
val = {"date":"", "time":"", "srcIP":"", "srcPort":"", "destIP":"", "destPort":"", "getAdd":"", "os":"", "browser":"", "mes":"", "detailMes":"", "userAgent":""}
for line in open(filename,"r"):
match = re.search(r'^--[0-9a-fA-F]{8,}-[A-Z]--$', line.strip())
if match:
i += 1
if match and i == 2:
val = parse_block(error_block, val)
error_block=""
i=1
error_block += line
return "Import to database successfully!"
else:
return "Error!"
|
<reponame>stroxler/LibCST
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
from libcst.codemod import CodemodTest
from libcst.codemod.commands.convert_type_comments import ConvertTypeComments
class TestConvertTypeComments(CodemodTest):
maxDiff = 1500
TRANSFORM = ConvertTypeComments
def assertCodemod39Plus(self, before: str, after: str) -> None:
"""
Assert that the codemod works on Python 3.9+, and that we raise
a NotImplementedError on other Python versions.
"""
if (sys.version_info.major, sys.version_info.minor) < (3, 9):
with self.assertRaises(NotImplementedError):
super().assertCodemod(before, after)
else:
super().assertCodemod(before, after)
# Tests converting assignment type comments -----------------
def test_convert_assignments(self) -> None:
before = """
y = 5 # type: int
z = ('this', 7) # type: typing.Tuple[str, int]
"""
after = """
y: int = 5
z: "typing.Tuple[str, int]" = ('this', 7)
"""
self.assertCodemod39Plus(before, after)
def test_convert_assignments_in_context(self) -> None:
"""
Also verify that our matching works regardless of spacing
"""
before = """
def foo():
z = ('this', 7) # type: typing.Tuple[str, int]
class C:
attr0 = 10# type: int
def __init__(self):
self.attr1 = True # type: bool
"""
after = """
def foo():
z: "typing.Tuple[str, int]" = ('this', 7)
class C:
attr0: int = 10
def __init__(self):
self.attr1: bool = True
"""
self.assertCodemod39Plus(before, after)
def test_multiple_elements_in_assign_lhs(self) -> None:
before = """
x, y = [], [] # type: List[int], List[str]
z, w = [], [] # type: (List[int], List[str])
a, b, *c = range(5) # type: float, float, List[float]
d, (e1, e2) = foo() # type: float, (int, str)
"""
after = """
x: "List[int]"
y: "List[str]"
x, y = [], []
z: "List[int]"
w: "List[str]"
z, w = [], []
a: float
b: float
c: "List[float]"
a, b, *c = range(5)
d: float
e1: int
e2: str
d, (e1, e2) = foo()
"""
self.assertCodemod39Plus(before, after)
def test_multiple_assignments(self) -> None:
before = """
x = y = z = 15 # type: int
a, b = c, d = 'this', 'that' # type: (str, str)
"""
after = """
x: int
y: int
z: int
x = y = z = 15
a: str
b: str
c: str
d: str
a, b = c, d = 'this', 'that'
"""
self.assertCodemod39Plus(before, after)
def test_semicolons_with_assignment(self) -> None:
"""
When we convert an Assign to an AnnAssign, preserve
semicolons. But if we have to add separate type declarations,
expand them.
"""
before = """
foo(); x = 12 # type: int
bar(); y, z = baz() # type: int, str
"""
after = """
foo(); x: int = 12
bar()
y: int
z: str
y, z = baz()
"""
self.assertCodemod39Plus(before, after)
def test_no_change_when_type_comment_unused(self) -> None:
before = """
# type-ignores are not type comments
x = 10 # type: ignore
# a commented type comment (per PEP 484) is not a type comment
z = 15 # # type: int
# a type comment in an illegal location won't be used
print("hello") # type: None
# These examples are not PEP 484 compliant, and result in arity errors
a, b = 1, 2 # type: Tuple[int, int]
w = foo() # type: float, str
# Multiple assigns with mismatched LHS arities always result in arity
# errors, and we only codemod if each target is error-free
v = v0, v1 = (3, 5) # type: int, int
"""
after = before
self.assertCodemod39Plus(before, after)
|
"""
GeoServer interaction operations.
Working assumptions for this module:
* Point coordinates are passed as shapely.geometry.Point instances.
* BBox coordinates are passed as (lon1, lat1, lon2, lat2).
* Shapes (polygons) are passed as shapely.geometry.shape parsable objects.
* All functions that require a CRS have a CRS argument with a default set to WGS84.
* GEO_URL points to the GeoServer instance hosting all files.
TODO: Refactor to remove functions that are just 2-lines of code.
For example, many function's logic essentially consists in creating the layer name.
We could have a function that returns the layer name, and then other functions expect the layer name.
"""
import inspect
import json
import os
import warnings
from pathlib import Path
from typing import Iterable, Optional, Sequence, Tuple, Union
from urllib.parse import urljoin
from requests import Request
from . import gis_import_error_message
try:
import fiona
import geopandas as gpd
import pandas as pd
from lxml import etree
from owslib.fes import PropertyIsEqualTo, PropertyIsLike
from owslib.wcs import WebCoverageService
from owslib.wfs import WebFeatureService
from shapely.geometry import Point, shape
except (ImportError, ModuleNotFoundError) as e:
msg = gis_import_error_message.format(Path(__file__).stem)
raise ImportError(msg) from e
try:
from owslib.fes2 import Intersects
from owslib.gml import Point as wfs_Point
except (ImportError, ModuleNotFoundError):
warnings.warn("WFS point spatial filtering requires OWSLib>0.24.1.")
Intersects = None
wfs_Point = None
# Do not remove the trailing / otherwise `urljoin` will remove the geoserver path.
# Can be set at runtime with `$ env GEO_URL=https://xx.yy.zz/geoserver/ ...`.
GEO_URL = os.getenv("GEO_URL", "https://pavics.ouranos.ca/geoserver/")
# We store the contour of different hydrobasins domains
hybas_dir = Path(__file__).parent.parent / "data" / "hydrobasins_domains"
hybas_pat = "hybas_lake_{}_lev01_v1c.zip"
# This could be inferred from existing files in hybas_dir
hybas_regions = ["na", "ar"]
hybas_domains = {dom: hybas_dir / hybas_pat.format(dom) for dom in hybas_regions}
def _get_location_wfs(
bbox: Optional[
Tuple[
Union[str, float, int],
Union[str, float, int],
Union[str, float, int],
Union[str, float, int],
]
] = None,
point: Optional[
Tuple[
Union[str, float, int],
Union[str, float, int],
]
] = None,
layer: str = None,
geoserver: str = GEO_URL,
) -> str:
"""Return leveled features from a hosted data set using bounding box coordinates and WFS 1.1.0 protocol.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
bbox : Optional[Tuple[Union[str, float, int], Union[str, float, int], Union[str, float, int], Union[str, float, int]]]
Geographic coordinates of the bounding box (left, down, right, up).
point : Optional[Tuple[Union[str, float, int], Union[str, float, int]]]
Geographic coordinates of an intersecting point (lon, lat).
layer : str
The WFS/WMS layer name requested.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
A GeoJSON-encoded vector feature.
"""
wfs = WebFeatureService(url=urljoin(geoserver, "wfs"), version="2.0.0", timeout=30)
if bbox and point:
raise NotImplementedError("Provide either 'bbox' or 'point'.")
if bbox:
kwargs = dict(bbox=bbox)
elif point:
# FIXME: Remove this once OWSlib > 0.24.1 is released.
if not Intersects and not wfs_Point:
raise NotImplementedError(
f"{inspect.stack()[1][3]} with point filtering requires OWSLib>0.24.1.",
)
p = wfs_Point(
id="feature",
srsName="http://www.opengis.net/gml/srs/epsg.xml#4326",
pos=point,
)
f = Intersects(propertyname="the_geom", geometry=p)
intersects = f.toXML()
kwargs = dict(filter=intersects)
else:
raise ValueError()
resp = wfs.getfeature(
typename=layer, outputFormat="application/json", method="POST", **kwargs
)
data = json.loads(resp.read())
return data
def _get_feature_attributes_wfs(
attribute: Sequence[str],
layer: str = None,
geoserver: str = GEO_URL,
) -> str:
"""Return WFS GetFeature URL request for attribute values.
Making this request will return a JSON response.
Parameters
----------
attribute : list
Attribute/field names.
layer : str
Name of geographic layer queried.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
WFS request URL.
Notes
-----
Non-existent attributes will raise a cryptic DriverError from fiona.
"""
params = dict(
service="WFS",
version="2.0.0",
request="GetFeature",
typename=layer,
outputFormat="application/json",
propertyName=",".join(attribute),
)
return Request("GET", url=urljoin(geoserver, "wfs"), params=params).prepare().url
def _filter_feature_attributes_wfs(
attribute: str,
value: Union[str, float, int],
layer: str,
geoserver: str = GEO_URL,
) -> str:
"""Return WFS GetFeature URL request filtering geographic features based on a property's value.
Parameters
----------
attribute : str
Attribute/field name.
value: Union[str, float, int]
Value for attribute queried.
layer : str
Name of geographic layer queried.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
WFS request URL.
"""
try:
attribute = str(attribute)
value = str(value)
except ValueError:
raise Exception("Unable to cast attribute/filter to string.")
filter_request = PropertyIsLike(propertyname=attribute, literal=value, wildCard="*")
filterxml = etree.tostring(filter_request.toXML()).decode("utf-8")
params = dict(
service="WFS",
version="1.1.0",
request="GetFeature",
typename=layer,
outputFormat="application/json",
filter=filterxml,
)
return Request("GET", url=urljoin(geoserver, "wfs"), params=params).prepare().url
def _determine_upstream_ids(
fid: str,
df: pd.DataFrame,
basin_field: str = None,
downstream_field: str = None,
basin_family: Optional[str] = None,
) -> pd.DataFrame:
"""Return a list of upstream features by evaluating the downstream networks.
Parameters
----------
fid : str
feature ID of the downstream feature of interest.
df : pd.DataFrame
Dataframe comprising the watershed attributes.
basin_field: str
The field used to determine the id of the basin according to hydro project.
downstream_field: str
The field identifying the downstream sub-basin for the hydro project.
basin_family: str, optional
Regional watershed code (For HydroBASINS dataset).
Returns
-------
pd.DataFrame
Basins ids including `fid` and its upstream contributors.
"""
def upstream_ids(bdf, bid):
return bdf[bdf[downstream_field] == bid][basin_field]
# Note: Hydro Routing `SubId` is a float for some reason and Python float != GeoServer double. Cast them to int.
if isinstance(fid, float):
fid = int(fid)
df[basin_field] = df[basin_field].astype(int)
df[downstream_field] = df[downstream_field].astype(int)
# Locate the downstream feature
ds = df.set_index(basin_field).loc[fid]
if basin_family is not None:
# Do a first selection on the main basin ID of the downstream feature.
sub = df[df[basin_family] == ds[basin_family]]
else:
sub = None
# Find upstream basins
up = [fid]
for b in up:
tmp = upstream_ids(sub if sub is not None else df, b)
if len(tmp):
up.extend(tmp)
return (
sub[sub[basin_field].isin(up)]
if sub is not None
else df[df[basin_field].isin(up)]
)
def get_raster_wcs(
coordinates: Union[Iterable, Sequence[Union[float, str]]],
geographic: bool = True,
layer: str = None,
geoserver: str = GEO_URL,
) -> bytes:
"""Return a subset of a raster image from the local GeoServer via WCS 2.0.1 protocol.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
coordinates : Sequence[Union[int, float, str]]
Geographic coordinates of the bounding box (left, down, right, up)
geographic : bool
If True, uses "Long" and "Lat" in WCS call. Otherwise uses "E" and "N".
layer : str
Layer name of raster exposed on GeoServer instance, e.g. 'public:CEC_NALCMS_LandUse_2010'
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
bytes
A GeoTIFF array.
"""
(left, down, right, up) = coordinates
if geographic:
x, y = "Long", "Lat"
else:
x, y = "E", "N"
wcs = WebCoverageService(url=urljoin(geoserver, "ows"), version="2.0.1")
try:
resp = wcs.getCoverage(
identifier=[layer],
format="image/tiff",
subsets=[(x, left, right), (y, down, up)],
timeout=120,
)
except Exception:
raise
data = resp.read()
try:
etree.fromstring(data)
# The response is an XML file describing the server error.
raise ChildProcessError(data)
except etree.XMLSyntaxError:
# The response is the DEM array.
return data
# ~~~~ HydroBASINS functions ~~~~ #
def hydrobasins_upstream(feature: dict, domain: str) -> pd.DataFrame:
"""Return a list of hydrobasins features located upstream.
Parameters
----------
feature : dict
Basin feature attributes, including the fields ["HYBAS_ID", "NEXT_DOWN", "MAIN_BAS"].
domain: {"na", "ar"}
Domain of the feature, North America or Arctic.
Returns
-------
pd.Series
Basins ids including `fid` and its upstream contributors.
"""
basin_field = "HYBAS_ID"
downstream_field = "NEXT_DOWN"
basin_family = "MAIN_BAS"
# This does not work with `wfs.getfeature`. No filtering occurs when asking for specific attributes.
# wfs = WebFeatureService(url=urljoin(geoserver, "wfs"), version="2.0.0", timeout=30)
# layer = f"public:USGS_HydroBASINS_{'lake_' if lakes else ''}{domain}_lev{str(level).zfill(2)}"
# filter = PropertyIsEqualTo(propertyname=basin_family, literal=feature[basin_family])
# Fetch all features in the same basin
req = filter_hydrobasins_attributes_wfs(
attribute=basin_family, value=feature[basin_family], domain=domain
)
df = gpd.read_file(req)
# Filter upstream watersheds
return _determine_upstream_ids(
fid=feature[basin_field],
df=df,
basin_field=basin_field,
downstream_field=downstream_field,
)
def hydrobasins_aggregate(gdf: pd.DataFrame) -> pd.DataFrame:
"""Aggregate multiple HydroBASINS watersheds into a single geometry.
Parameters
----------
gdf : pd.DataFrame
Watershed attributes indexed by HYBAS_ID
Returns
-------
pd.DataFrame
"""
i0 = gdf.index[0]
# TODO: Review. Not sure it all makes sense. --> Looks fine to me? (TJS)
def aggfunc(x):
if x.name in ["COAST", "DIST_MAIN", "DIST_SINK"]:
return x.min()
elif x.name in ["SUB_AREA", "LAKE"]:
return x.sum()
else:
return x.loc[i0]
# Buffer function to fix invalid geometries
gdf["geometry"] = gdf.buffer(0)
return gdf.dissolve(by="MAIN_BAS", aggfunc=aggfunc)
def select_hybas_domain(
bbox: Optional[
Tuple[
Union[int, float], Union[int, float], Union[int, float], Union[int, float]
]
] = None,
point: Optional[Tuple[Union[int, float], Union[int, float]]] = None,
) -> str:
"""
Provided a given coordinate or boundary box, return the domain name of the geographic region
the coordinate is located within.
Parameters
----------
bbox : Optional[Tuple[Union[float, int], Union[float, int], Union[float, int], Union[float, int]]]
Geographic coordinates of the bounding box (left, down, right, up).
point : Optional[Tuple[Union[float, int], Union[float, int]]]
Geographic coordinates of an intersecting point (lon, lat).
Returns
-------
str
The domain that the coordinate falls within. Possible results: "na", "ar".
"""
if bbox and point:
raise NotImplementedError("Provide either 'bbox' or 'point'.")
if point:
bbox = point * 2
for dom, fn in hybas_domains.items():
with open(fn, "rb") as f:
zf = fiona.io.ZipMemoryFile(f)
coll = zf.open(fn.stem + ".shp")
for _ in coll.filter(bbox=bbox):
return dom
raise LookupError(f"Could not find feature containing bbox: {bbox}.")
def filter_hydrobasins_attributes_wfs(
attribute: str,
value: Union[str, float, int],
domain: str,
geoserver: str = GEO_URL,
) -> str:
"""Return a URL that formats and returns a remote GetFeatures request from the USGS HydroBASINS dataset.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
attribute : str
Attribute/field to be queried.
value: Union[str, float, int]
Value for attribute queried.
domain : {"na", "ar"}
The domain of the HydroBASINS data.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
URL to the GeoJSON-encoded WFS response.
"""
lakes = True
level = 12
layer = f"public:USGS_HydroBASINS_{'lake_' if lakes else ''}{domain}_lev{str(level).zfill(2)}"
q = _filter_feature_attributes_wfs(
attribute=attribute, value=value, layer=layer, geoserver=geoserver
)
return q
def get_hydrobasins_location_wfs(
coordinates: Tuple[
Union[str, float, int],
Union[str, float, int],
],
domain: str = None,
geoserver: str = GEO_URL,
) -> str:
"""Return features from the USGS HydroBASINS data set using bounding box coordinates.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
coordinates : Tuple[Union[str, float, int], Union[str, float, int]]
Geographic coordinates of the bounding box (left, down, right, up).
domain : {"na", "ar"}
The domain of the HydroBASINS data.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
A GeoJSON-encoded vector feature.
"""
lakes = True
level = 12
layer = f"public:USGS_HydroBASINS_{'lake_' if lakes else ''}{domain}_lev{str(level).zfill(2)}"
if not wfs_Point and not Intersects:
data = _get_location_wfs(bbox=coordinates * 2, layer=layer, geoserver=geoserver)
else:
data = _get_location_wfs(point=coordinates, layer=layer, geoserver=geoserver)
return data
# ~~~~ Hydro Routing ~~~~ #
def hydro_routing_upstream(
fid: Union[str, float, int],
level: int = 12,
lakes: str = "1km",
geoserver: str = GEO_URL,
) -> pd.Series:
"""Return a list of hydro routing features located upstream.
Parameters
----------
fid : Union[str, float, int]
Basin feature ID code of the downstream feature.
level : int
Level of granularity requested for the lakes vector (range(7,13)). Default: 12.
lakes : {"1km", "all"}
Query the version of dataset with lakes under 1km in width removed ("1km") or return all lakes ("all").
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
pd.Series
Basins ids including `fid` and its upstream contributors.
"""
wfs = WebFeatureService(url=urljoin(geoserver, "wfs"), version="2.0.0", timeout=30)
layer = f"public:routing_{lakes}Lakes_{str(level).zfill(2)}"
# Get attributes necessary to identify upstream watersheds
resp = wfs.getfeature(
typename=layer,
propertyname=["SubId", "DowSubId"],
outputFormat="application/json",
)
df = gpd.read_file(resp)
# Identify upstream features
df_upstream = _determine_upstream_ids(
fid=fid,
df=df,
basin_field="SubId",
downstream_field="DowSubId",
)
# Fetch upstream features
resp = wfs.getfeature(
typename=layer,
featureid=df_upstream["id"].tolist(),
outputFormat="application/json",
)
return gpd.read_file(resp.read().decode())
def get_hydro_routing_attributes_wfs(
attribute: Sequence[str],
level: int = 12,
lakes: str = "1km",
geoserver: str = GEO_URL,
) -> str:
"""Return a URL that formats and returns a remote GetFeatures request from hydro routing dataset.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
attribute : list
Attributes/fields to be queried.
level : int
Level of granularity requested for the lakes vector (range(7,13)). Default: 12.
lakes : {"1km", "all"}
Query the version of dataset with lakes under 1km in width removed ("1km") or return all lakes ("all").
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
URL to the GeoJSON-encoded WFS response.
"""
layer = f"public:routing_{lakes}Lakes_{str(level).zfill(2)}"
return _get_feature_attributes_wfs(
attribute=attribute, layer=layer, geoserver=geoserver
)
def filter_hydro_routing_attributes_wfs(
attribute: str = None,
value: Union[str, float, int] = None,
level: int = 12,
lakes: str = "1km",
geoserver: str = GEO_URL,
) -> str:
"""Return a URL that formats and returns a remote GetFeatures request from hydro routing dataset.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
attribute : list
Attributes/fields to be queried.
value: str or int or float
The requested value for the attribute.
level : int
Level of granularity requested for the lakes vector (range(7,13)). Default: 12.
lakes : {"1km", "all"}
Query the version of dataset with lakes under 1km in width removed ("1km") or return all lakes ("all").
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
URL to the GeoJSON-encoded WFS response.
"""
layer = f"public:routing_{lakes}Lakes_{str(level).zfill(2)}"
return _filter_feature_attributes_wfs(
attribute=attribute, value=value, layer=layer, geoserver=geoserver
)
def get_hydro_routing_location_wfs(
coordinates: Tuple[
Union[int, float, str],
Union[str, float, int],
],
lakes: str,
level: int = 12,
geoserver: str = GEO_URL,
) -> str:
"""Return features from the hydro routing data set using bounding box coordinates.
For geographic rasters, subsetting is based on WGS84 (Long, Lat) boundaries. If not geographic, subsetting based
on projected coordinate system (Easting, Northing) boundaries.
Parameters
----------
coordinates : Tuple[Union[str, float, int], Union[str, float, int]]
Geographic coordinates of the bounding box (left, down, right, up).
lakes : {"1km", "all"}
Query the version of dataset with lakes under 1km in width removed ("1km") or return all lakes ("all").
level : int
Level of granularity requested for the lakes vector (range(7,13)). Default: 12.
geoserver: str
The address of the geoserver housing the layer to be queried. Default: https://pavics.ouranos.ca/geoserver/.
Returns
-------
str
A GML-encoded vector feature.
"""
layer = f"public:routing_{lakes}Lakes_{str(level).zfill(2)}"
if not wfs_Point and not Intersects:
data = _get_location_wfs(bbox=coordinates * 2, layer=layer, geoserver=geoserver)
else:
data = _get_location_wfs(point=coordinates, layer=layer, geoserver=geoserver)
return data
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing AutoContrast op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision.transforms as vision
from mindspore import log as logger
from util import visualize_list, visualize_one_channel_dataset, diff_mse, save_and_check_md5
DATA_DIR = "../data/dataset/testImageNetData/train/"
MNIST_DATA_DIR = "../data/dataset/testMnistData"
GENERATE_GOLDEN = False
def test_auto_contrast_py(plot=False):
"""
Test AutoContrast
"""
logger.info("Test AutoContrast Python implementation")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_original = np.append(images_original,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
# AutoContrast Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_auto_contrast = \
mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(cutoff=10.0, ignore=[10, 20]),
vision.ToTensor()])
ds_auto_contrast = data_set.map(operations=transforms_auto_contrast, input_columns="image")
ds_auto_contrast = ds_auto_contrast.batch(512)
for idx, (image, _) in enumerate(ds_auto_contrast):
if idx == 0:
images_auto_contrast = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_auto_contrast = np.append(images_auto_contrast,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_auto_contrast[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
# Compare with expected md5 from images
filename = "autocontrast_01_result_py.npz"
save_and_check_md5(ds_auto_contrast, filename, generate_golden=GENERATE_GOLDEN)
if plot:
visualize_list(images_original, images_auto_contrast)
def test_auto_contrast_c(plot=False):
"""
Test AutoContrast C implementation
"""
logger.info("Test AutoContrast C implementation")
# AutoContrast Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
python_op = vision.AutoContrast(cutoff=10.0, ignore=[10, 20])
c_op = vision.AutoContrast(cutoff=10.0, ignore=[10, 20])
transforms_op = mindspore.dataset.transforms.transforms.Compose([lambda img: vision.ToPIL()(img.astype(np.uint8)),
python_op,
np.array])
ds_auto_contrast_py = data_set.map(operations=transforms_op, input_columns="image")
ds_auto_contrast_py = ds_auto_contrast_py.batch(512)
for idx, (image, _) in enumerate(ds_auto_contrast_py):
if idx == 0:
images_auto_contrast_py = image.asnumpy()
else:
images_auto_contrast_py = np.append(images_auto_contrast_py,
image.asnumpy(),
axis=0)
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
ds_auto_contrast_c = data_set.map(operations=c_op, input_columns="image")
ds_auto_contrast_c = ds_auto_contrast_c.batch(512)
for idx, (image, _) in enumerate(ds_auto_contrast_c):
if idx == 0:
images_auto_contrast_c = image.asnumpy()
else:
images_auto_contrast_c = np.append(images_auto_contrast_c,
image.asnumpy(),
axis=0)
num_samples = images_auto_contrast_c.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_auto_contrast_c[i], images_auto_contrast_py[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
np.testing.assert_equal(np.mean(mse), 0.0)
# Compare with expected md5 from images
filename = "autocontrast_01_result_c.npz"
save_and_check_md5(ds_auto_contrast_c, filename, generate_golden=GENERATE_GOLDEN)
if plot:
visualize_list(images_auto_contrast_c, images_auto_contrast_py, visualize_mode=2)
def test_auto_contrast_one_channel_c(plot=False):
"""
Test AutoContrast C implementation with one channel
"""
logger.info("Test AutoContrast C implementation With One Channel Images")
# AutoContrast Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224))], input_columns=["image"])
python_op = vision.AutoContrast()
c_op = vision.AutoContrast()
# not using vision.ToTensor() since it converts to floats
transforms_op = mindspore.dataset.transforms.transforms.Compose(
[lambda img: (np.array(img)[:, :, 0]).astype(np.uint8),
vision.ToPIL(),
python_op,
np.array])
ds_auto_contrast_py = data_set.map(operations=transforms_op, input_columns="image")
ds_auto_contrast_py = ds_auto_contrast_py.batch(512)
for idx, (image, _) in enumerate(ds_auto_contrast_py):
if idx == 0:
images_auto_contrast_py = image.asnumpy()
else:
images_auto_contrast_py = np.append(images_auto_contrast_py,
image.asnumpy(),
axis=0)
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)), lambda img: np.array(img[:, :, 0])],
input_columns=["image"])
ds_auto_contrast_c = data_set.map(operations=c_op, input_columns="image")
ds_auto_contrast_c = ds_auto_contrast_c.batch(512)
for idx, (image, _) in enumerate(ds_auto_contrast_c):
if idx == 0:
images_auto_contrast_c = image.asnumpy()
else:
images_auto_contrast_c = np.append(images_auto_contrast_c,
image.asnumpy(),
axis=0)
num_samples = images_auto_contrast_c.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(np.squeeze(images_auto_contrast_c[i]), images_auto_contrast_py[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
np.testing.assert_equal(np.mean(mse), 0.0)
if plot:
visualize_list(images_auto_contrast_c, images_auto_contrast_py, visualize_mode=2)
def test_auto_contrast_mnist_c(plot=False):
"""
Test AutoContrast C implementation with MNIST dataset (Grayscale images)
"""
logger.info("Test AutoContrast C implementation With MNIST Images")
data_set = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
ds_auto_contrast_c = data_set.map(operations=vision.AutoContrast(cutoff=1, ignore=(0, 255)), input_columns="image")
ds_orig = ds.MnistDataset(dataset_dir=MNIST_DATA_DIR, num_samples=2, shuffle=False)
images = []
images_trans = []
labels = []
for _, (data_orig, data_trans) in enumerate(zip(ds_orig, ds_auto_contrast_c)):
image_orig, label_orig = data_orig
image_trans, _ = data_trans
images.append(image_orig.asnumpy())
labels.append(label_orig.asnumpy())
images_trans.append(image_trans.asnumpy())
# Compare with expected md5 from images
filename = "autocontrast_mnist_result_c.npz"
save_and_check_md5(ds_auto_contrast_c, filename, generate_golden=GENERATE_GOLDEN)
if plot:
visualize_one_channel_dataset(images, images_trans, labels)
def test_auto_contrast_invalid_ignore_param_c():
"""
Test AutoContrast C implementation with invalid ignore parameter
"""
logger.info("Test AutoContrast C implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=vision.AutoContrast(ignore=255.5), input_columns="image")
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value 255.5 is not of type" in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(), vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=vision.AutoContrast(ignore=(10, 100)), input_columns="image")
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value (10,100) is not of type" in str(error)
def test_auto_contrast_invalid_cutoff_param_c():
"""
Test AutoContrast C implementation with invalid cutoff parameter
"""
logger.info("Test AutoContrast C implementation with invalid cutoff parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=vision.AutoContrast(cutoff=-10.0), input_columns="image")
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[vision.Decode(),
vision.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
# invalid ignore
data_set = data_set.map(operations=vision.AutoContrast(cutoff=120.0), input_columns="image")
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
def test_auto_contrast_invalid_ignore_param_py():
"""
Test AutoContrast Python implementation with invalid ignore parameter
"""
logger.info("Test AutoContrast Python implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
ignore=255.5),
vision.ToTensor()])],
input_columns=["image"])
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value 255.5 is not of type" in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
ignore=(10, 100)),
vision.ToTensor()])],
input_columns=["image"])
except TypeError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Argument ignore with value (10,100) is not of type" in str(error)
def test_auto_contrast_invalid_cutoff_param_py():
"""
Test AutoContrast Python implementation with invalid cutoff parameter
"""
logger.info("Test AutoContrast Python implementation with invalid cutoff parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(
cutoff=-10.0),
vision.ToTensor()])],
input_columns=["image"])
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(
operations=[mindspore.dataset.transforms.transforms.Compose([vision.Decode(True),
vision.Resize((224, 224)),
vision.AutoContrast(cutoff=120.0),
vision.ToTensor()])],
input_columns=["image"])
except ValueError as error:
logger.info("Got an exception in DE: {}".format(str(error)))
assert "Input cutoff is not within the required interval of [0, 50)." in str(error)
if __name__ == "__main__":
test_auto_contrast_py(plot=True)
test_auto_contrast_c(plot=True)
test_auto_contrast_one_channel_c(plot=True)
test_auto_contrast_mnist_c(plot=True)
test_auto_contrast_invalid_ignore_param_c()
test_auto_contrast_invalid_ignore_param_py()
test_auto_contrast_invalid_cutoff_param_c()
test_auto_contrast_invalid_cutoff_param_py()
|
from setting import *
from msg_box_class import *
from input_Number import *
from PyQt5.QtCore import pyqtSignal as pys
import sys
class settingForm(QtWidgets.QMainWindow,Ui_setting):
fill_sig=pys(bool)
grid_sig=pys(bool)
fw_sig=pys(int)
embbed_sig=pys(bool)
eg_sig=pys(bool)
p_sig=pys(bool)
tv_sig=pys(bool)
fr_sig=pys(bool)
pdial_sig=pys(str,int)
peepdial_sig=pys(str,int)
tvdial_sig=pys(str,int)
brSlider_sig=pys(str,int)
frSlider_sig=pys(str,int)
def __init__(self,parent=None):
super(settingForm,self).__init__(parent)
self.setupUi(self)
global tgState
self.maxOpacity=1
self.minOpacity=0.2
tgState=True
self.debug=False
self.mouse_location=(0,0)
self.grid=True
self.valueRequesting_obj=""
#############################################################################################
if tgState: #
self.tg.move(40,0) #
self.tg.setText(str(1)) #
self.tg.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;") #
self.toggleSwitch.setStyleSheet("background-color:green;border-radius:10px;")
if self.grid:
self.tg_g.move(20,0) #
self.tg_g.setText(str(1)) #
self.tg_g.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;") #
self.toggleSwitch_grid.setStyleSheet("background-color:green;border-radius:10px;") #
#############################################################################################
self.setTransparency(self.trans_slider.value())
self.eg.toggled[bool].connect(self.hide_ecg)
self.trans_slider.setMinimum(20)
self.trans_slider.setMaximum(100)
self.trans_slider.setValue(60)
self.pg.toggled[bool].connect(self.hide_p)
self.frg.toggled[bool].connect(self.hide_fr)
self.tvg.toggled[bool].connect(self.hide_tv)
self.fwSlider.valueChanged[int].connect(self.fillLevelUpdater)
self.embbed.clicked.connect(self.embbed_rd)
self.separate.clicked.connect(self.isolate_rd)
self.tg.clicked.connect(self.toggle)
self.tg_btn.clicked.connect(self.toggle)
self.tg_g.clicked.connect(self.toggle)
self.trans_slider.valueChanged[int].connect(self.setTransparency)
self.pdial.valueChanged.connect(self.sendValues)
self.tvdial.valueChanged.connect(self.sendValues)
self.peepdial.valueChanged.connect(self.sendValues)
self.frSlider.valueChanged.connect(self.sendValues)
self.brSlider.valueChanged.connect(self.sendValues)
self.pdValue.clicked.connect(self.openNI_window)
self.peepValue.clicked.connect(self.openNI_window)
self.tvValue.clicked.connect(self.openNI_window)
self.frValue.clicked.connect(self.openNI_window)
self.brValue.clicked.connect(self.openNI_window)
def openNI_window(self):
self.valueRequesting_obj=self.sender().objectName()
self.mouse_location=(self.pos().x(),self.sender().pos().y()+self.pos().y())
self.numericInput_window=numberInput(loc=self.mouse_location)
self.numericInput_window.input_num.connect(self.receivedData)
self.numericInput_window.show()
def receivedData(self,data):
self.requesting_obj=self.findChild(QtWidgets.QLabel,self.valueRequesting_obj)
if self.valueRequesting_obj=="pdValue":
if(int(data)>self.pdial.maximum()):
error="Error","The value of pressure can not exceed <b>{}</b> ".format(str(self.pdial.maximum()))+"<br>please repeat the process "
self.alert=msg_box(msg=error)
self.alert.show()
else:
self.requesting_obj.setText(data)
self.pdial_sig.emit("pdial",int(data))
self.pdial.setValue(int(data))
self.valueRequesting_obj=""
elif self.valueRequesting_obj=="peepValue":
if(int(data)>self.peepdial.maximum()):
error="Error","The value of PEEP can not exceed <b>{}</b> ".format(str(self.peepdial.maximum()))+"<br>please repeat the process "
self.alert=msg_box(msg=error)
self.alert.show()
else:
self.requesting_obj.setText(data)
self.peepdial_sig.emit("peepdial",int(data))
self.peepdial.setValue(int(data))
self.valueRequesting_obj=""
elif self.valueRequesting_obj=="tvValue":
if(int(data)>self.tvdial.maximum()):
error="Error","The value of Tidal Volume can not exceed <b>{}</b> ".format(str(self.tvdial.maximum()))+"<br>please repeat the process "
self.alert=msg_box(msg=error)
self.alert.show()
else:
self.requesting_obj.setText(data)
self.tvdial_sig.emit("tvdial",int(data))
self.tvdial.setValue(int(data))
self.valueRequesting_obj=""
elif self.valueRequesting_obj=="frValue":
if(int(data)>self.frSlider.maximum()):
error="Error","The value of flow Rate can not exceed <b>{}</b> ".format(str(self.frSlider.maximum()))+"<br>please repeat the process "
self.alert=msg_box(msg=error)
self.alert.show()
else:
self.requesting_obj.setText(data)
self.frSlider_sig.emit("frSlider",int(data))
self.frSlider.setValue(int(data))
self.valueRequesting_obj=""
elif self.valueRequesting_obj=="brValue":
if(int(data)>self.brSlider.maximum()):
error="Error","The value of Breath Rate can not exceed <b>{}</b> ".format(str(self.brSlider.maximum()))+"<br>please repeat the process "
self.alert=msg_box(msg=error)
self.alert.show()
else:
self.requesting_obj.setText(data)
self.brSlider_sig.emit("brSlider",int(data))
self.brSlider.setValue(int(data))
self.valueRequesting_obj=""
## def mousePressEvent(self,QMouseEvent):
## cursor=QtGui.QCursor()
## self.mouse_location=cursor.pos()
## print(self.mouse_location)
## def mousePressEvent(self,QMouseEvent):
## print(QMouseEvent.pos())
def sendValues(self,val):
self.widget=self.sender().objectName()
if self.widget=="pdial":
self.pdial_sig.emit(self.widget,val)
elif self.widget=="peepdial":
self.peepdial_sig.emit(self.widget,val)
elif self.widget=="tvdial":
self.tvdial_sig.emit(self.widget,val)
elif self.widget=="frSlider":
self.frSlider_sig.emit(self.widget,val)
elif self.widget=="brSlider":
self.brSlider_sig.emit(self.widget,val)
def setTransparency(self,pos):
trans=pos*(self.maxOpacity-self.minOpacity)/(self.trans_slider.maximum()-self.trans_slider.minimum())
self.setWindowOpacity(trans)
def hide_ecg(self,val):
self.eg_sig.emit(val)
def hide_tv(self,val):
self.tv_sig.emit(val)
def hide_fr(self,val):
self.fr_sig.emit(val)
def hide_p(self,val):
self.p_sig.emit(val)
def fill_rd(self):
self.fill_sig.emit(True)
def unfill_rd(self):
self.fill_sig.emit(False)
def embbed_rd(self):
self.embbed_sig.emit(True)
def isolate_rd(self):
self.embbed_sig.emit(False)
def fillLevelUpdater(self, value):
self.fw_sig.emit(value)
def toggle(self):
self.sent=self.sender().objectName()
if self.sent=="tg":
global tgState
tgState=not tgState;
self.fill_sig.emit(tgState)
if tgState:
self.tg.move(40,0)
self.tg.setText(str(1))
self.tg.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;")
self.toggleSwitch.setStyleSheet("background-color:green;border-radius:10px;")
else:
self.tg.move(0,0)
self.tg.setText(str(0))
self.tg.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;color:red;")
self.toggleSwitch.setStyleSheet("background-color:red;border-radius:10px;")
if self.sent=="tg_btn":
self.debug= not self.debug
if self.debug:
self.tg_btn.move(40,0)
self.tg_btn.setText(str(1))
self.tg_btn.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;")
self.debug_tg.setStyleSheet("background-color:green;border-radius:10px;")
else:
self.tg_btn.move(0,0)
self.tg_btn.setText(str(0))
self.tg_btn.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;color:red;")
self.debug_tg.setStyleSheet("background-color:red;border-radius:10px;")
if self.sent=="tg_g":
self.grid= not self.grid
self.grid_sig.emit(self.grid)
if self.grid:
self.tg_g.move(20,0)
self.tg_g.setText(str(1))
self.tg_g.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;")
self.toggleSwitch_grid.setStyleSheet("background-color:green;border-radius:10px;")
else:
self.tg_g.move(0,0)
self.tg_g.setText(str(0))
self.tg_g.setStyleSheet("background-color: rgb(248, 248, 248);border-radius:10px;color:red;")
self.toggleSwitch_grid.setStyleSheet("background-color:red;border-radius:10px;")
def main():
app=QtWidgets.QApplication(sys.argv)
form=settingForm()
form.show()
sys.exit(app.exec_())
if __name__=="__main__":
main()
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=redefined-outer-name, no-self-use
from typing import Union
import allure
import pytest
from adcm_client.base import ObjectNotFound
from adcm_client.objects import Cluster, Provider, Host, Service, Component
from adcm_pytest_plugin.steps.actions import (
run_host_action_and_assert_result,
run_cluster_action_and_assert_result,
run_service_action_and_assert_result,
run_component_action_and_assert_result,
)
from adcm_pytest_plugin.utils import get_data_dir
ACTION_ON_HOST = "action_on_host"
ACTION_ON_HOST_MULTIJOB = "action_on_host_multijob"
ACTION_ON_HOST_STATE_REQUIRED = "action_on_host_state_installed"
FIRST_SERVICE = "Dummy service"
SECOND_SERVICE = "Second service"
FIRST_COMPONENT = "first"
SECOND_COMPONENT = "second"
SWITCH_SERVICE_STATE = "switch_service_state"
SWITCH_CLUSTER_STATE = "switch_cluster_state"
SWITCH_HOST_STATE = "switch_host_state"
SWITCH_COMPONENT_STATE = "switch_component_state"
@allure.title("Create cluster")
@pytest.fixture()
def cluster(sdk_client_fs) -> Cluster:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, "cluster"))
return bundle.cluster_prototype().cluster_create(name="Cluster")
@allure.title("Create a cluster with service")
@pytest.fixture()
def cluster_with_service(sdk_client_fs) -> Cluster:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, "cluster_with_service"))
cluster = bundle.cluster_prototype().cluster_create(name="Cluster with services")
return cluster
@allure.title("Create a cluster with service and components")
@pytest.fixture()
def cluster_with_components(sdk_client_fs) -> Cluster:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, "cluster_with_components"))
cluster = bundle.cluster_prototype().cluster_create(name="Cluster with components")
return cluster
@allure.title("Create a cluster with target group action")
@pytest.fixture()
def cluster_with_target_group_action(sdk_client_fs) -> Cluster:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, "cluster_target_group"))
cluster = bundle.cluster_prototype().cluster_create(name="Target group test")
return cluster
@allure.title("Create provider")
@pytest.fixture()
def provider(sdk_client_fs) -> Provider:
bundle = sdk_client_fs.upload_from_fs(get_data_dir(__file__, "provider"))
return bundle.provider_prototype().provider_create("Some provider")
class TestClusterActionsOnHost:
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_availability(self, cluster: Cluster, provider: Provider, action_name):
"""
Test that cluster host action is available on cluster host and is absent on cluster
"""
host1 = provider.host_create("host_in_cluster")
host2 = provider.host_create("host_not_in_cluster")
cluster.host_add(host1)
action_in_object_is_present(action_name, host1)
action_in_object_is_absent(action_name, host2)
action_in_object_is_absent(action_name, cluster)
run_host_action_and_assert_result(host1, action_name, status="success")
def test_availability_at_state(self, cluster: Cluster, provider: Provider):
"""
Test that cluster host action is available on specify cluster state
"""
host = provider.host_create("host_in_cluster")
cluster.host_add(host)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_cluster_action_and_assert_result(cluster, SWITCH_CLUSTER_STATE)
action_in_object_is_present(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST_STATE_REQUIRED)
def test_availability_at_host_state(self, cluster: Cluster, provider: Provider):
"""
Test that cluster host action isn't available on specify host state
"""
host = provider.host_create("host_in_cluster")
cluster.host_add(host)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, SWITCH_HOST_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_cluster_action_and_assert_result(cluster, SWITCH_CLUSTER_STATE)
action_in_object_is_present(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST_STATE_REQUIRED)
@allure.issue("https://arenadata.atlassian.net/browse/ADCM-1799")
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_two_clusters(self, action_name, cluster: Cluster, provider: Provider):
"""
Test that cluster actions on host works fine on two clusters
"""
second_cluster = cluster.bundle().cluster_prototype().cluster_create(name="Second cluster")
first_host = provider.host_create("host_in_first_cluster")
second_host = provider.host_create("host_in_second_cluster")
cluster.host_add(first_host)
second_cluster.host_add(second_host)
action_in_object_is_present(action_name, first_host)
action_in_object_is_present(action_name, second_host)
run_host_action_and_assert_result(first_host, action_name, status="success")
run_host_action_and_assert_result(second_host, action_name, status="success")
class TestServiceActionOnHost:
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_availability(self, cluster_with_service: Cluster, provider: Provider, action_name):
"""
Test that service host action is available on a service host and is absent on cluster
"""
service = cluster_with_service.service_add(name=FIRST_SERVICE)
second_service = cluster_with_service.service_add(name=SECOND_SERVICE)
host_with_two_components = provider.host_create("host_with_two_components")
host_with_one_component = provider.host_create("host_with_one_component")
host_without_component = provider.host_create("host_without_component")
host_with_different_services = provider.host_create("host_with_different_services")
host_outside_cluster = provider.host_create("host_outside_cluster")
for host in [
host_with_two_components,
host_with_one_component,
host_without_component,
host_with_different_services,
]:
cluster_with_service.host_add(host)
cluster_with_service.hostcomponent_set(
(host_with_two_components, service.component(name=FIRST_COMPONENT)),
(host_with_two_components, service.component(name=SECOND_COMPONENT)),
(host_with_one_component, service.component(name=FIRST_COMPONENT)),
(host_with_different_services, service.component(name=SECOND_COMPONENT)),
(host_with_different_services, second_service.component(name=FIRST_COMPONENT)),
)
action_in_object_is_present(action_name, host_with_one_component)
action_in_object_is_present(action_name, host_with_two_components)
action_in_object_is_present(action_name, host_with_different_services)
action_in_object_is_absent(action_name, host_without_component)
action_in_object_is_absent(action_name, host_outside_cluster)
action_in_object_is_absent(action_name, cluster_with_service)
action_in_object_is_absent(action_name, service)
run_host_action_and_assert_result(host_with_one_component, action_name)
run_host_action_and_assert_result(host_with_two_components, action_name)
run_host_action_and_assert_result(host_with_different_services, action_name)
def test_availability_at_state(self, cluster_with_service: Cluster, provider: Provider):
"""
Test that service host action is available on specify service state
"""
service = cluster_with_service.service_add(name=FIRST_SERVICE)
host = provider.host_create("host_in_cluster")
cluster_with_service.host_add(host)
cluster_with_service.hostcomponent_set((host, service.component(name=FIRST_COMPONENT)))
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_cluster_action_and_assert_result(cluster_with_service, SWITCH_CLUSTER_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_service_action_and_assert_result(service, SWITCH_SERVICE_STATE)
action_in_object_is_present(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST_STATE_REQUIRED)
def test_availability_at_host_state(self, cluster_with_service: Cluster, provider: Provider):
"""
Test that service host action isn't available on specify host state
"""
service = cluster_with_service.service_add(name=FIRST_SERVICE)
host = provider.host_create("host_in_cluster")
cluster_with_service.host_add(host)
cluster_with_service.hostcomponent_set((host, service.component(name=FIRST_COMPONENT)))
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, SWITCH_HOST_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_service_action_and_assert_result(service, SWITCH_SERVICE_STATE)
action_in_object_is_present(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST_STATE_REQUIRED)
@allure.issue("https://arenadata.atlassian.net/browse/ADCM-1799")
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_two_clusters(self, action_name, cluster_with_service: Cluster, provider: Provider):
"""
Test that service actions on host works fine on two clusters
"""
second_cluster = cluster_with_service.bundle().cluster_prototype().cluster_create(name="Second cluster")
service_on_first_cluster = cluster_with_service.service_add(name=FIRST_SERVICE)
service_on_second_cluster = second_cluster.service_add(name=FIRST_SERVICE)
first_host = provider.host_create("host_in_first_cluster")
second_host = provider.host_create("host_in_second_cluster")
cluster_with_service.host_add(first_host)
second_cluster.host_add(second_host)
cluster_with_service.hostcomponent_set((first_host, service_on_first_cluster.component(name=FIRST_COMPONENT)))
second_cluster.hostcomponent_set((second_host, service_on_second_cluster.component(name=FIRST_COMPONENT)))
action_in_object_is_present(action_name, first_host)
action_in_object_is_present(action_name, second_host)
run_host_action_and_assert_result(first_host, action_name, status="success")
run_host_action_and_assert_result(second_host, action_name, status="success")
class TestComponentActionOnHost:
@allure.issue(
url="https://arenadata.atlassian.net/browse/ADCM-1948", name="Infinite host action on ADCM with pre-filled data"
)
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_availability(self, cluster_with_components: Cluster, provider: Provider, action_name):
"""
Test that component host action is available on a component host
"""
service = cluster_with_components.service_add(name=FIRST_SERVICE)
component_with_action = service.component(name=FIRST_COMPONENT)
component_without_action = service.component(name=SECOND_COMPONENT)
host_single_component = provider.host_create("host_with_single_component")
host_two_components = provider.host_create("host_with_two_components")
host_component_without_action = provider.host_create("host_component_without_action")
host_without_components = provider.host_create("host_without_components")
host_outside_cluster = provider.host_create("host_outside_cluster")
for host in [
host_single_component,
host_two_components,
host_component_without_action,
host_without_components,
]:
cluster_with_components.host_add(host)
cluster_with_components.hostcomponent_set(
(host_single_component, component_with_action),
(host_two_components, component_with_action),
(host_two_components, component_without_action),
(host_component_without_action, component_without_action),
)
action_in_object_is_present(action_name, host_single_component)
action_in_object_is_present(action_name, host_two_components)
action_in_object_is_absent(action_name, host_component_without_action)
action_in_object_is_absent(action_name, host_without_components)
action_in_object_is_absent(action_name, host_outside_cluster)
action_in_object_is_absent(action_name, cluster_with_components)
action_in_object_is_absent(action_name, service)
action_in_object_is_absent(action_name, component_with_action)
action_in_object_is_absent(action_name, component_without_action)
run_host_action_and_assert_result(host_single_component, action_name)
run_host_action_and_assert_result(host_two_components, action_name)
def test_availability_at_state(self, cluster_with_components: Cluster, provider: Provider):
"""
Test that component host action is available on specify service state
"""
service = cluster_with_components.service_add(name=FIRST_SERVICE)
component = service.component(name=FIRST_COMPONENT)
adjacent_component = service.component(name=SECOND_COMPONENT)
host = provider.host_create("host_in_cluster")
cluster_with_components.host_add(host)
cluster_with_components.hostcomponent_set((host, component))
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_cluster_action_and_assert_result(cluster_with_components, SWITCH_CLUSTER_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_service_action_and_assert_result(service, SWITCH_SERVICE_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, SWITCH_HOST_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_component_action_and_assert_result(adjacent_component, SWITCH_COMPONENT_STATE)
action_in_object_is_absent(ACTION_ON_HOST_STATE_REQUIRED, host)
run_component_action_and_assert_result(component, SWITCH_COMPONENT_STATE)
action_in_object_is_present(ACTION_ON_HOST_STATE_REQUIRED, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST_STATE_REQUIRED)
@allure.issue("https://arenadata.atlassian.net/browse/ADCM-1799")
@pytest.mark.parametrize("action_name", [ACTION_ON_HOST, ACTION_ON_HOST_MULTIJOB])
def test_two_clusters(self, action_name, cluster_with_components: Cluster, provider: Provider):
"""
Test that component actions on host works fine on two clusters
"""
second_cluster = cluster_with_components.bundle().cluster_prototype().cluster_create(name="Second cluster")
service_on_first_cluster = cluster_with_components.service_add(name=FIRST_SERVICE)
component_on_first_cluster = service_on_first_cluster.component(name=FIRST_COMPONENT)
service_on_second_cluster = second_cluster.service_add(name=FIRST_SERVICE)
component_on_second_cluster = service_on_second_cluster.component(name=FIRST_COMPONENT)
first_host = provider.host_create("host_in_first_cluster")
second_host = provider.host_create("host_in_second_cluster")
cluster_with_components.host_add(first_host)
second_cluster.host_add(second_host)
cluster_with_components.hostcomponent_set((first_host, component_on_first_cluster))
second_cluster.hostcomponent_set((second_host, component_on_second_cluster))
action_in_object_is_present(action_name, first_host)
action_in_object_is_present(action_name, second_host)
run_host_action_and_assert_result(first_host, action_name, status="success")
run_host_action_and_assert_result(second_host, action_name, status="success")
def test_target_group_in_inventory(cluster_with_target_group_action: Cluster, provider: Provider, sdk_client_fs):
"""
Test that target group action has inventory_hostname info
"""
hostname = "host_in_cluster"
host = provider.host_create(hostname)
cluster_with_target_group_action.host_add(host)
action_in_object_is_present(ACTION_ON_HOST, host)
run_host_action_and_assert_result(host, ACTION_ON_HOST)
with allure.step("Assert that hostname in job log is present"):
assert (
f"We are on host: {hostname}" in sdk_client_fs.job().log(type="stdout").content
), "No hostname info in the job log"
ObjTypes = Union[Cluster, Host, Service, Component]
def action_in_object_is_present(action: str, obj: ObjTypes):
with allure.step(f"Assert that action {action} is present in {_get_object_represent(obj)}"):
try:
obj.action(name=action)
except ObjectNotFound as err:
raise AssertionError(f"Action {action} not found in object {obj}") from err
def action_in_object_is_absent(action: str, obj: ObjTypes):
with allure.step(f"Assert that action {action} is absent in {_get_object_represent(obj)}"):
with pytest.raises(ObjectNotFound):
obj.action(name=action)
def _get_object_represent(obj: ObjTypes) -> str:
return f"host {obj.fqdn}" if isinstance(obj, Host) else f"cluster {obj.name}"
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Email address type.
"""
__license__ = """
GoLismero 2.0 - The web knife - Copyright (C) 2011-2014
Golismero project site: https://github.com/golismero
Golismero project mail: <EMAIL>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
__all__ = ["Email"]
from . import Resource
from .domain import Domain
from .. import identity
from ...config import Config
from ...text.text_utils import to_utf8
#------------------------------------------------------------------------------
class Email(Resource):
"""
Email address.
"""
#--------------------------------------------------------------------------
def __init__(self, address, name = None):
"""
:param address: Email address.
:type address: str
:param name: Optional real life name associated with this email.
:type name: str | None
"""
# Check the data types.
address = to_utf8(address)
name = to_utf8(name)
if not isinstance(address, str):
raise TypeError("Expected string, got %r instead", type(address))
if name is not None and not isinstance(name, str):
raise TypeError("Expected string, got %r instead", type(name))
# Do a very rudimentary validation of the email address.
# This will at least keep users from confusing the order
# of the arguments.
if "@" not in address or not address[0].isalnum() or \
not address[-1].isalnum():
raise ValueError("Invalid email address: %s" % address)
# Email address.
self.__address = address
# Real name.
self.__name = name
# Parent constructor.
super(Email, self).__init__()
#--------------------------------------------------------------------------
def __str__(self):
return self.address
#--------------------------------------------------------------------------
def __repr__(self):
return "<Email address=%r name=%r>" % (self.address, self.name)
#--------------------------------------------------------------------------
@property
def display_name(self):
return "E-Mail Address"
#--------------------------------------------------------------------------
def is_in_scope(self, scope = None):
if scope is None:
scope = Config.audit_scope
return self.hostname in scope
#--------------------------------------------------------------------------
@identity
def address(self):
"""
:return: Email address.
:rtype: str
"""
return self.__address
#--------------------------------------------------------------------------
@property
def name(self):
"""
:return: Real name.
:rtype: str | None
"""
return self.__name
#--------------------------------------------------------------------------
@property
def url(self):
"""
:return: mailto:// URL for this email address.
:rtype: str
"""
return "mailto://" + self.__address
#--------------------------------------------------------------------------
@property
def username(self):
"""
:return: Username for this email address.
:rtype: str
"""
return self.__address.split("@", 1)[0].strip().lower()
#--------------------------------------------------------------------------
@property
def hostname(self):
"""
:return: Hostname for this email address.
:rtype: str
"""
return self.__address.split("@", 1)[1].strip().lower()
#--------------------------------------------------------------------------
@property
def discovered(self):
if self.is_in_scope():
return [Domain(self.hostname)]
return []
|
<filename>src/pymordemos/output_error_estimation.py
#!/usr/bin/env python
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import matplotlib.pyplot as plt
from typer import Argument, run
from pymor.basic import *
def main(
fom_number: int = Argument(..., help='Selects FOMs [0, 1, 2] for elliptic problems and [3, 4] for '
+ 'parabolic problems with scalar and vector valued outputs '),
grid_intervals: int = Argument(..., help='Grid interval count.'),
training_samples: int = Argument(..., help='Number of samples used for training the reduced basis.'),
modes: int = Argument(..., help='Number of basis functions for the RB space (generated with POD)'),
reductor_count: int = Argument(..., help='Reductor type for elliptic problems: \
0: SimpleCoerciveReductor \
1: CoerciveRBReductor. \
For parabolic FOMs [4, 5] ParabolicRBReductor is used.')):
set_log_levels({'pymor': 'WARN'})
"""Example script for using output error estimation"""
assert fom_number in [0, 1, 2, 3, 4], f'No FOM available for fom_number {fom_number}'
# elliptic case
if fom_number == 0:
# real valued output
fom = create_fom(grid_intervals, vector_valued_output=False)
elif fom_number == 1:
# vector valued output (with BlockColumnOperator)
fom = create_fom(grid_intervals, vector_valued_output=True)
elif fom_number == 2:
# an output which is actually a lincomb operator
fom = create_fom(grid_intervals, vector_valued_output=True)
dim_source = fom.output_functional.source.dim
np.random.seed(1)
random_matrix_1 = np.random.rand(2, dim_source)
random_matrix_2 = np.random.rand(2, dim_source)
op1 = NumpyMatrixOperator(random_matrix_1, source_id='STATE')
op2 = NumpyMatrixOperator(random_matrix_2, source_id='STATE')
ops = [op1, op2]
lincomb_op = LincombOperator(ops, [1., 0.5])
fom = fom.with_(output_functional=lincomb_op)
# parabolic case
elif fom_number in [3, 4]:
from pymordemos.parabolic_mor import discretize_pymor
fom = discretize_pymor()
if fom_number == 3:
fom = fom.with_(output_functional=fom.rhs.operators[0].H)
else:
random_matrix_1 = np.random.rand(2, fom.solution_space.dim)
op = NumpyMatrixOperator(random_matrix_1, source_id='STATE')
fom = fom.with_(output_functional=op)
if reductor_count == 0:
reductor = SimpleCoerciveRBReductor
elif reductor_count == 1:
reductor = CoerciveRBReductor
if fom_number in [3, 4]:
reductor = ParabolicRBReductor
# Parameter space and operator are equal for all elliptic and parabolic foms
if fom_number in [0, 1, 2]:
parameter_space = fom.parameters.space(0.1, 1)
coercivity_estimator = ExpressionParameterFunctional('min(diffusion)', fom.parameters)
else:
parameter_space = fom.parameters.space(1, 100)
coercivity_estimator = ExpressionParameterFunctional('1.', fom.parameters)
training_set = parameter_space.sample_uniformly(training_samples)
# generate solution snapshots
primal_snapshots = fom.solution_space.empty()
fom_outputs = []
# construct training data
for mu in training_set:
comp_data = fom.compute(output=True, solution=True, mu=mu)
primal_snapshots.append(comp_data['solution'])
fom_outputs.append(comp_data['output'])
# apply POD on bases
product = fom.h1_0_semi_product
primal_reduced_basis, _ = pod(primal_snapshots, modes=modes, product=product)
RB_reductor = reductor(fom, RB=primal_reduced_basis, product=product,
coercivity_estimator=coercivity_estimator)
# rom
rom = RB_reductor.reduce()
results_full = {'fom': [], 'rom': [], 'err': [], 'est': []}
for i, mu in enumerate(training_set):
s_fom = fom_outputs[i]
s_rom, s_est = rom.output(return_error_estimate=True, mu=mu,
return_error_estimate_vector=False)
results_full['fom'].append(s_fom)
results_full['rom'].append(s_rom)
results_full['err'].append(np.linalg.norm(np.abs(s_fom[-1]-s_rom[-1])))
results_full['est'].append(s_est)
# just for testing purpose
assert np.linalg.norm(np.abs(s_rom-s_fom)) <= s_est + 1e-8
# also test return_estimate_vector and return_error_sequence functionality but do not use it
s_rom_, s_est_ = rom.output(return_error_estimate=True, mu=mu,
return_error_estimate_vector=True)
assert np.allclose(s_rom, s_rom_)
assert np.allclose(s_est, np.linalg.norm(s_est_))
if fom_number in [3, 4]:
s_rom__, s_est__ = rom.output(return_error_estimate=True, mu=mu, return_error_sequence=True)
s_rom___, s_est___ = rom.output(return_error_estimate=True, mu=mu,
return_error_estimate_vector=True,
return_error_sequence=True)
# s_rom always stays the same
assert np.allclose(s_rom, s_rom__, s_rom___)
assert s_est__[-1] == s_est
assert np.allclose(s_est__, np.linalg.norm(s_est___, axis=1))
# plot result
plt.figure()
plt.semilogy(np.arange(len(training_set)), results_full['err'], 'k', label=f'output error basis size {modes}')
plt.semilogy(np.arange(len(training_set)), results_full['est'], 'k--', label=f'output estimate basis size {modes}')
plt.title(f'Error and estimate for {modes} basis functions for parameters in training set')
plt.legend()
# estimator study for smaller number of basis functions
modes_set = np.arange(1, rom.solution_space.dim+1)
max_errs, max_ests, min_errs, min_ests = [], [], [], []
for mode in modes_set:
max_err, max_est, min_err, min_est = 0, 0, 1000, 1000
rom = RB_reductor.reduce(mode)
for i, mu in enumerate(training_set):
s_fom = fom_outputs[i]
s_rom, s_est = rom.output(return_error_estimate=True, mu=mu)
max_err = max(max_err, np.linalg.norm(np.abs(s_fom-s_rom)))
max_est = max(max_est, s_est)
min_err = min(min_err, np.linalg.norm(np.abs(s_fom-s_rom)))
min_est = min(min_est, s_est)
max_errs.append(max_err)
max_ests.append(max_est)
min_errs.append(min_err)
min_ests.append(min_est)
plt.figure()
plt.semilogy(modes_set, max_errs, 'k', label='max error')
plt.semilogy(modes_set, max_ests, 'k--', label='max estimate')
plt.semilogy(modes_set, min_errs, 'g', label='min error')
plt.semilogy(modes_set, min_ests, 'g--', label='min estimate')
plt.legend()
plt.title('Evolution of maximum error and estimate for different RB sizes')
plt.show()
def create_fom(grid_intervals, vector_valued_output=False):
p = thermal_block_problem([2, 2])
f = ConstantFunction(1, dim_domain=2)
if vector_valued_output:
p = p.with_(outputs=[('l2', f), ('l2', f * 0.5)])
else:
p = p.with_(outputs=[('l2', f)])
fom, _ = discretize_stationary_cg(p, diameter=1./grid_intervals)
return fom
if __name__ == '__main__':
run(main)
|
<filename>parsers/sourcecode/graph2subtokengraph.py
"""
Usage:
graph2otherformat.py [options] INPUTS_FILE REWRITTEN_OUTPUTS_FILE
Options:
-h --help Show this screen.
--debug Enable debug routines. [default: False]
"""
from docopt import docopt
import pdb
import sys
import traceback
from typing import Iterable, List, Dict
import re
from collections import defaultdict
from data.utils import iteratate_jsonl_gz, save_jsonl_gz
from parsers.sourcecode.utils import subtokenizer
IDENTIFIER_REGEX = re.compile(r"[a-zA-Z_][a-zA-Z_0-9]*")
def graph_transformer(initial_graph: Dict) -> Dict:
all_edges = defaultdict(lambda: defaultdict(set))
for edge_type, from_idx, to_idx in initial_graph["edges"]:
all_edges[edge_type][from_idx].add(to_idx)
backbone_seq = initial_graph["backbone_sequence"] # type: List[int]
nodes = initial_graph["node_labels"] # type: List[str]
nodes_to_subtokenize = {} # type: Dict[int, List[str]]
for i, node_idx in enumerate(backbone_seq):
if not IDENTIFIER_REGEX.match(nodes[node_idx]):
continue
subtokens = subtokenizer(nodes[node_idx])
if len(subtokens) > 1:
nodes_to_subtokenize[node_idx] = subtokens
# Add subtoken nodes and related edges
token_node_ids_to_subtoken_ids = defaultdict(list) # type: Dict[int, List[int]]
def add_node(node_name: str) -> int:
idx = len(nodes)
nodes.append(node_name)
return idx
for token_node_idx, subtokens in nodes_to_subtokenize.items():
for subtoken in subtokens:
subtoken_node_idx = add_node(subtoken)
token_node_ids_to_subtoken_ids[token_node_idx].append(subtoken_node_idx)
all_edges["Subtoken"][token_node_idx].add(subtoken_node_idx)
# Now fix the backbone_sequence
update_backbone_seq = [] # type: List[int]
for node_idx in backbone_seq:
if node_idx in token_node_ids_to_subtoken_ids:
update_backbone_seq.extend(token_node_ids_to_subtoken_ids[node_idx])
else:
update_backbone_seq.append(node_idx)
# Now make sure that there are NextToken edges
for i in range(1, len(update_backbone_seq)):
all_edges["NextToken"][update_backbone_seq[i - 1]].add(update_backbone_seq[i])
# Finally, output the defined data structure
flattened_edges = []
for edge_type, edges_of_type in all_edges.items():
for from_idx, to_idxes in edges_of_type.items():
for to_idx in to_idxes:
flattened_edges.append((edge_type, from_idx, to_idx))
return dict(edges=flattened_edges, node_labels=nodes, backbone_sequence=backbone_seq)
def transform_graphs(input_file: str) -> Iterable[Dict]:
for graph in iteratate_jsonl_gz(input_file):
yield graph_transformer(graph)
def run(args):
save_jsonl_gz(args["REWRITTEN_OUTPUTS_FILE"], transform_graphs(args["INPUTS_FILE"]))
if __name__ == "__main__":
args = docopt(__doc__)
try:
run(args)
except:
if args.get("--debug", False):
_, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
else:
raise
|
<gh_stars>0
import math
import os
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import heat_map_utils
from config_file import config
# Check the availability of GPUs
if torch.cuda.is_available():
device = torch.device("cuda:0") # Uncomment this to run on GPU
print("\nUsing one GPU:", torch.cuda.get_device_name(0))
else:
device = torch.device('cpu')
class SpatialSoftMax(torch.nn.Module):
def __init__(self, height, width, channel, temperature=None):
super(SpatialSoftMax, self).__init__()
self.height = height
self.width = width
self.channel = channel
if temperature:
self.temperature = Parameter(torch.ones(1) * temperature)
else:
self.temperature = 1.
pos_x, pos_y = np.meshgrid(
np.linspace(-1., 1., self.height),
np.linspace(-1., 1., self.width)
)
pos_x = torch.from_numpy(pos_x.reshape(self.height * self.width)).float()
pos_y = torch.from_numpy(pos_y.reshape(self.height * self.width)).float()
self.register_buffer('pos_x', pos_x)
self.register_buffer('pos_y', pos_y)
def forward(self, heat_maps):
"""
Params:
@ heat_map: size B x 21 x W x H
Output size: (B*K) x 2
"""
heat_maps = heat_maps.view(-1, self.height * self.width)
softmax_attention = F.softmax(heat_maps, dim=-1)
expected_x = torch.sum(self.pos_x * softmax_attention, dim=1, keepdim=True)
expected_y = torch.sum(self.pos_y * softmax_attention, dim=1, keepdim=True)
expected_xy = torch.cat([expected_x, expected_y], 1)
del heat_maps
del softmax_attention
del expected_x
del expected_y
return expected_xy
def save_list(file_name, lst):
with open(file_name, 'w') as f:
s = str(lst)
s = s.replace('[', '').replace(']', '').replace(',', '')
f.write(s)
def save_multilist(file_name, multilst):
with open(file_name, 'w') as f:
for lst in multilst:
s = str(lst)
s = s.replace('[', '').replace(']', '').replace(',', '') + '\n'
f.write(s)
class MetricsRecorder(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
__slots__ = "hm_loss_fn", "calc_kps_with_softmax", "pose2d_loss_fn", "num_train_iters", \
"num_val_iters", "writer", "val_hm_loss", "val_PCK_of_each_joint", "val_EPE_of_each_joint", \
"val_hm_loss_min", "val_EPE_min", "new_val_flag", "count", "accumulated_loss", \
"val_PCK_max", "train_hm_loss", "train_kps_loss", "train_total_loss", "val_avg_PCK", "val_avg_EPE"
def __init__(self):
self.hm_loss_fn = torch.nn.MSELoss(reduction='sum') # Mean-square error: sum((A-B).^2)
self.calc_kps_with_softmax = SpatialSoftMax(config.heat_map_size, config.heat_map_size, 21,
temperature=True).to(device)
self.pose2d_loss_fn = torch.nn.MSELoss(reduction='sum') # Mean-square error: sum((A-B).^2)
self.num_train_iters = 0
self.num_val_iters = 0
self.writer = SummaryWriter()
self.val_hm_loss = []
self.val_PCK_of_each_joint = [[] for _ in range(21)] # The percentages of correct key points
self.val_EPE_of_each_joint = [[] for _ in range(21)] # The end-point errors of correct key points
self.val_avg_PCK = []
self.val_avg_EPE = [] # unit: px
self.val_hm_loss_min = 1e6
self.val_EPE_min = 1e6
self.val_PCK_max = 0
self.train_hm_loss = []
self.train_kps_loss = []
self.train_total_loss = []
def process_training_output(self, hm_pred: torch.Tensor, hm_gn: torch.Tensor, kps_gn: torch.Tensor):
"""
This function computes all loss functions and record current performance.
params:
@ hm_pred: 21 predicted heat maps of size B x 21(kps) x 64(W) x 64(H)
@ hm_gn: The ground truth heat maps of size B x 21(kps) x 64(W) x 64(H)
@ kps_gn: The 2D key point annotations of size B x K x 2
retval:
@ hm_loss: heat map loss
"""
# 1. Heat map loss: the Frobenius norm sqrt(sum((A-B).^2))
hm_loss = self.hm_loss_fn(hm_pred, hm_gn.float()) # size: a scalar
# 2. 2D pose loss: the average distance between predicted key points and the ground truth
kps_pred = self.calc_kps_with_softmax(hm_pred) # size: (B*21) x 2
kps_gn = kps_gn.view(-1, 2).to(device) # change its size to (B*21) x 2
kps_loss = torch.tensor(0) #self.pose2d_loss_fn(kps_pred, kps_gn) # loss function: Euclidean distance, size:
total_loss = config.hm_loss_weight * hm_loss # + config.kps_loss_weight * kps_loss
self.num_train_iters += 1
if self.num_train_iters % config.record_period == 0:
print("{}/{} iterations have been finished".format(self.num_train_iters, config.num_iters))
self.train_hm_loss.append(hm_loss.item())
print("heat map loss ({:.2e}): {:.4f}".format(config.hm_loss_weight, hm_loss.item()))
self.train_kps_loss.append(kps_loss.item())
print("2D Pose Loss ({:.2e}): {:.4f}".format(config.kps_loss_weight, kps_loss.item()))
self.train_total_loss.append(total_loss.item())
print("Total Loss: {:.4f}\n".format(total_loss.item()))
self.writer.add_scalar('HM_Loss/Training', hm_loss.item(), self.num_train_iters)
self.writer.add_scalar('EPE_Loss/Training', kps_loss.item(), self.num_train_iters)
self.writer.add_scalar('Total_Loss/Training', total_loss.item(), self.num_train_iters)
return total_loss
def eval(self):
self.val_hm_loss.append(0)
self.val_avg_PCK.append(0)
self.val_avg_EPE.append(0)
for i in range(21):
self.val_PCK_of_each_joint[i].append(0)
self.val_EPE_of_each_joint[i].append(0)
def finish_eval(self):
is_best_HM_loss, is_best_EPE, is_best_PCK = False, False, False
self.val_hm_loss[-1] /= config.num_train_samples
for i in range(21):
self.val_PCK_of_each_joint[i][-1] /= config.num_train_samples
self.val_EPE_of_each_joint[i][-1] /= config.num_train_samples
self.val_avg_EPE[-1] /= config.num_train_samples
self.val_avg_PCK[-1] /= (config.num_train_samples * 21)
self.num_val_iters += 1
self.writer.add_scalar('HM_Loss/Validation', self.val_hm_loss[-1], self.num_val_iters)
self.writer.add_scalar('PCK/Validation', self.val_avg_PCK[-1], self.num_val_iters)
self.writer.add_scalar('EPE/Validation', self.val_avg_EPE[-1], self.num_val_iters)
if self.val_EPE_min > self.val_avg_EPE[-1]:
self.val_EPE_min = self.val_avg_EPE[-1]
is_best_EPE = True
if self.val_PCK_max < self.val_avg_PCK[-1]:
self.val_PCK_max = self.val_avg_PCK[-1]
is_best_PCK = True
if self.val_hm_loss_min > self.val_hm_loss[-1]:
self.val_hm_loss_min = self.val_hm_loss[-1]
iis_best_HM_loss = True
return is_best_HM_loss, is_best_EPE, is_best_PCK
def process_validation_output(self, hm_pred: torch.Tensor, hm_gn: torch.Tensor, kps_gn: torch.Tensor):
"""
This function computes all loss functions and record current performance.
params:
@ hm_pred: 21 predicted heat maps of size B x 21(kps) x 64(W) x 64(H)
@ hm_gn: The ground truth heat maps of size B x 21(kps) x 64(W) x 64(H)
@ kps_gn: The 2D key point annotations of size B x K x 2
retval:
@ flag: True means the current model is better.
"""
hm_loss = self.hm_loss_fn(hm_pred, hm_gn.float())
self.val_hm_loss[-1] += hm_loss.item()
# size: B x K
EPE_each_batch_each_joint, PCK_each_batch_each_joint = self.calc_PCK_EPE(hm_pred, kps_gn, 10)
EPE_each_joint = torch.sum(EPE_each_batch_each_joint, dim=0) # size: K
PCK_each_joint = torch.sum(PCK_each_batch_each_joint, dim=0) # size: K
for i in range(21):
self.val_PCK_of_each_joint[i][-1] += PCK_each_joint[i].item()
self.val_EPE_of_each_joint[i][-1] += EPE_each_joint[i].item()
self.val_avg_EPE[-1] += torch.mean(EPE_each_joint).item()
self.val_avg_PCK[-1] += torch.sum(PCK_each_joint).item()
# probability of correct points
def calc_PCK_EPE(self, hm_pred: torch.tensor, kps_gn: torch.tensor, th: int):
"""
This function calculates PCK and EPE for each joint but returns average PCK and EPE over all joints.
params:
@ hm_pred: 21 predicted heat maps of size B x K(21 kps) x 64(W) x 64(H)
@ kps_gn: The 2D key point annotations of size B x K x 2
@ th: The threshold within which a predicted key point is seen as correct
"""
# find the predicted key points with the most probability. Size: B x K x 3
pred_kp = heat_map_utils.compute_uv_from_heatmaps(hm=hm_pred, resize_dim=config.heat_map_size)
pred_kp = pred_kp.to(device)
# calculate end-point error
EPE_each_batch_each_joint = torch.linalg.norm(pred_kp[:, :, 0:2] - kps_gn, dim=2) # size: B x K
PCK_each_batch_each_joint = EPE_each_batch_each_joint < th # size: B x K
del pred_kp
return EPE_each_batch_each_joint, PCK_each_batch_each_joint
def close(self):
self.writer.flush()
self.writer.close()
save_list(os.path.join(config.history_dir, config.model_name + '_train_hm_loss.txt'), self.train_hm_loss)
save_list(os.path.join(config.history_dir, config.model_name + '_train_kps_loss.txt'), self.train_kps_loss)
save_list(os.path.join(config.history_dir, config.model_name + '_train_total_loss.txt'), self.train_total_loss)
save_list(os.path.join(config.history_dir, config.model_name + '_val_hm_loss.txt'), self.val_hm_loss)
save_list(os.path.join(config.history_dir, config.model_name + '_val_avg_EPE.txt'), self.val_avg_EPE)
save_list(os.path.join(config.history_dir, config.model_name + '_val_avg_PCK.txt'), self.val_avg_PCK)
save_multilist(os.path.join(config.history_dir, config.model_name + '_val_PCK_of_each_joint.txt'), self.val_PCK_of_each_joint)
save_multilist(os.path.join(config.history_dir, config.model_name + '_val_EPE_of_each_joint.txt'),
self.val_EPE_of_each_joint)
# data = torch.zeros([1, 3, 3, 3])
# data[0, 0, 0, 1] = 10
# data[0, 1, 1, 1] = 10
# data[0, 2, 1, 2] = 10
# layer = SpatialSoftMax(3, 3, 3, temperature=1)
# print(layer(data))
|
"""
One-off script to opt-out users for email from orgs.
Input: A CSV file with a user_id,org pair per line. For example:
1962921,FooX
5506350,BarX
5709986,FooX
Lines formatted with a double-quoted org also work fine, such as:
5506350,"BarX"
Opts-out every specified user/org combo row from email by setting the 'email-optin' tag to 'False'.
If the user/org combo does not currently exist in the table, a row will be created for it which
will be have the 'email-optin' tag set to 'False'.
"""
import csv
import logging
import time
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from django.db import connections
from django.db.utils import DatabaseError
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
One-off script to opt-out users for email from orgs.
Input: A CSV file with a user_id,org pair per line. For example:
1962921,FooX
5506350,BarX
5709986,FooX
Lines formatted with a double-quoted org also work fine, such as:
5506350,"BarX"
Opts-out every specified user/org combo row from email by setting the 'email-optin' tag to 'False'.
If the user/org combo does not currently exist in the table, a row will be created for it which
will be have the 'email-optin' tag set to 'False'.
"""
help = dedent(__doc__).strip()
# Default number of user/org opt-outs to perform in each DB transaction.
DEFAULT_CHUNK_SIZE = 1000
# Default number of seconds to sleep between chunked user/org email opt-outs.
DEFAULT_SLEEP_BETWEEN_OPTOUTS = 0.0
def add_arguments(self, parser):
parser.add_argument(
'--dry_run',
action='store_true',
help='Print proposed changes, but take no action.'
)
parser.add_argument(
'--chunk_size',
default=self.DEFAULT_CHUNK_SIZE,
type=int,
help='Maximum number of user/org opt-outs to perform in each DB transaction.'
)
parser.add_argument(
'--sleep_between',
default=self.DEFAULT_SLEEP_BETWEEN_OPTOUTS,
type=float,
help='Seconds to sleep between chunked opt-outs.'
)
parser.add_argument(
'--optout_csv_path',
required=True,
help='Filepath to CSV file containing user/org email opt-outs.'
)
def handle(self, *args, **options):
"""
Execute the command.
"""
dry_run = options['dry_run']
chunk_size = options.get('chunk_size', self.DEFAULT_CHUNK_SIZE)
sleep_between = options.get('sleep_between', self.DEFAULT_SLEEP_BETWEEN_OPTOUTS)
optout_path = options['optout_csv_path']
if chunk_size <= 0:
raise CommandError(f'Only positive chunk size is allowed ({chunk_size}).')
if sleep_between < 0:
raise CommandError(f'Only non-negative sleep between seconds is allowed ({sleep_between}).')
# Read the CSV file. Log the number of user/org rows read.
with open(optout_path) as csv_file:
optout_reader = csv.reader(csv_file)
optout_rows = list(optout_reader)
log.info("Read %s opt-out rows from CSV file '%s'.", len(optout_rows), optout_path)
cursor = connections['default'].cursor()
# Update/insert the rows one chunk at a time.
curr_row_idx = 0
start_idx = 0
while curr_row_idx < len(optout_rows):
start_idx = curr_row_idx
end_idx = min(start_idx + chunk_size - 1, len(optout_rows) - 1)
log.info("Attempting opt-out for rows (%s, %s) through (%s, %s)...",
optout_rows[start_idx][0], optout_rows[start_idx][1],
optout_rows[end_idx][0], optout_rows[end_idx][1])
# Build the SQL query.
query = 'INSERT INTO user_api_userorgtag (`user_id`, `org`, `key`, `value`, `created`, `modified`) VALUES '
query_values = []
for idx in range(start_idx, end_idx + 1):
query_values.append('({},"{}","email-optin","False",NOW(),NOW())'.format(
optout_rows[idx][0], optout_rows[idx][1])
)
query += ','.join(query_values)
query += ' ON DUPLICATE KEY UPDATE value="False", modified=NOW();'
# Execute the SQL query.
if dry_run:
log.info(query)
else:
try:
cursor.execute('START TRANSACTION;')
cursor.execute(query)
except DatabaseError as err:
cursor.execute('ROLLBACK;')
log.error("Rolled-back opt-out for rows (%s, %s) through (%s, %s): %s",
optout_rows[start_idx][0], optout_rows[start_idx][1],
optout_rows[end_idx][0], optout_rows[end_idx][1],
str(err))
raise
else:
cursor.execute('COMMIT;')
log.info("Committed opt-out for rows (%s, %s) through (%s, %s).",
optout_rows[start_idx][0], optout_rows[start_idx][1],
optout_rows[end_idx][0], optout_rows[end_idx][1])
log.info("Sleeping %s seconds...", sleep_between)
time.sleep(sleep_between)
curr_row_idx += chunk_size
|
<reponame>prakHr/opencv-computer_vision
#Accessing the webcam
'''
import cv2
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_AREA)
cv2.imshow('Input', frame)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
'''
#Keyboard Inputs
'''
import argparse
import cv2
def argument_parser():
parser = argparse.ArgumentParser(description="Change color space of the input video stream using keyboard controls. The control keys are: Grayscale - 'g', YUV - 'y', HSV - 'h'")
return parser
if __name__=='__main__':
args = argument_parser().parse_args()
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
cur_char = -1
prev_char = -1
while True:
# Read the current frame from webcam
ret, frame = cap.read()
# Resize the captured image
frame = cv2.resize(frame, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_AREA)
#Listen to the keyboard events
c = cv2.waitKey(1)#returns the ASCII value of the keyboard input
if c == 27:
break
if c > -1 and c != prev_char:
cur_char = c
prev_char = c
if cur_char == ord('g'):
output = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
elif cur_char == ord('y'):
output = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV)
elif cur_char == ord('h'):
output = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
else:
output = frame
cv2.imshow('Webcam', output)
cap.release()
cv2.destroyAllWindows()
'''
#Mouse inputs
'''
import cv2
import numpy as np
def detect_quadrant(event, x, y, flags, param):#x,y coordinate is obtained after mouse clicking
if event == cv2.EVENT_LBUTTONDOWN:
if x > width/2:
if y > height/2:
point_top_left = (int(width/2), int(height/2))
point_bottom_right = (width-1, height-1)
else:
point_top_left = (int(width/2), 0)
point_bottom_right = (width-1, int(height/2))
else:
if y > height/2:
point_top_left = (0, int(height/2))
point_bottom_right = (int(width/2), height-1)
else:
point_top_left = (0, 0)
point_bottom_right = (int(width/2), int(height/2))
cv2.rectangle(img, (0,0), (width-1,height-1), (255,255,255), -1)#white rectangle
cv2.rectangle(img, point_top_left, point_bottom_right, (0,100,0),-1)#green rectangle
if __name__=='__main__':
width, height = 640, 480
img = 255 * np.ones((height, width, 3), dtype=np.uint8)
cv2.namedWindow('Input window')
cv2.setMouseCallback('Input window', detect_quadrant)
while True:
cv2.imshow('Input window', img)
c = cv2.waitKey(10)
if c == 27:break
cv2.destroyAllWindows()
'''
#To see list of all mouse events
'''
import cv2
print([x for x in dir(cv2) if x.startswith('EVENT')])
events=['EVENT_FLAG_ALTKEY', 'EVENT_FLAG_CTRLKEY', 'EVENT_FLAG_LBUTTON', 'EVENT_FLAG_MBUTTON', 'EVENT_FLAG_RBUTTON', 'EVENT_FLAG_SHIFTKEY', 'EVENT_LBUTTONDBLCLK', 'EVENT_LBUTTONDOWN', 'EVENT_LBUTTONUP', 'EVENT_MBUTTONDBLCLK', 'EVENT_MBUTTONDOWN', 'EVENT_MBUTTONUP', 'EVENT_MOUSEHWHEEL', 'EVENT_MOUSEMOVE', 'EVENT_MOUSEWHEEL', 'EVENT_RBUTTONDBLCLK', 'EVENT_RBUTTONDOWN', 'EVENT_RBUTTONUP']
'''
#Interacting with a live video stream
'''
import cv2
import numpy as np
def draw_rectangle(event, x, y, flags, params):
"""
Whenever we draw a rectangle using the
mouse, we basically have to detect three types of mouse events: mouse click, mouse
movement, and mouse button release. This is exactly what we do in this function.
Whenever we detect a mouse click event, we initialize the top left point of the rectangle.
As we move the mouse, we select the region of interest by keeping the current position as
the bottom right point of the rectangle.
Once we have the region of interest, we just invert the pixels to apply the “negative film”
effect. We subtract the current pixel value from 255 and this gives us the desired effect.
When the mouse movement stops and button-up event is detected, we stop updating the
bottom right position of the rectangle. We just keep displaying this image until another
mouse click event is detected.
"""
global x_init, y_init, drawing, top_left_pt, bottom_right_pt
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
x_init, y_init = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing:
top_left_pt = (min(x_init, x), min(y_init, y))
bottom_right_pt = (max(x_init, x), max(y_init, y))
img[y_init:y, x_init:x] = 255 - img[y_init:y, x_init:x]
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
top_left_pt = (min(x_init, x), min(y_init, y))
bottom_right_pt = (max(x_init, x), max(y_init, y))
img[y_init:y, x_init:x] = 255 - img[y_init:y, x_init:x]
if __name__=='__main__':
drawing = False
top_left_pt, bottom_right_pt = (-1,-1), (-1,-1)
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
cv2.namedWindow('Webcam')
cv2.setMouseCallback('Webcam', draw_rectangle)
while True:
ret, frame = cap.read()
img = cv2.resize(frame, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_AREA)
(x0,y0), (x1,y1) = top_left_pt, bottom_right_pt
img[y0:y1, x0:x1] = 255 - img[y0:y1, x0:x1]
cv2.imshow('Webcam', img)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows()
'''
#Apply median filter to an image
'''
import cv2
import numpy as np
img = cv2.imread('input.png')
output = cv2.medianBlur(img, 7)#size of kernel related to neighborhood size
cv2.imshow('Input', img)
cv2.imshow('Median filter', output)
cv2.waitKey()
'''
#####################################
# #
# Cartoonizing an image ######
#####################################
import cv2
import numpy as np
def cartoonize_image(img, ds_factor=4, sketch_mode=False):
# Convert image to grayscale
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply median filter to the grayscale image to remove salt and pepper noise
img_gray = cv2.medianBlur(img_gray,7)
# Detect edges in the image and threshold it
edges = cv2.Laplacian(img_gray, cv2.CV_8U, ksize=5)
ret, mask = cv2.threshold(edges, 100, 255, cv2.THRESH_BINARY_INV)
# 'mask' is the sketch of the image
#if sketch_mode:return cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
if sketch_mode:
img_sketch = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
kernel = np.ones((3,3), np.uint8)
img_eroded = cv2.erode(img_sketch, kernel, iterations=1)
return cv2.medianBlur(img_eroded, 5)
# Resize the image to a smaller size for faster computation
img_small = cv2.resize(img, None, fx=1.0/ds_factor, fy=1.0/ds_factor,
interpolation=cv2.INTER_AREA)
num_repetitions = 10
sigma_color = 5
sigma_space = 7
size = 5
# Apply bilateral filter the image multiple times
for i in range(num_repetitions):
img_small = cv2.bilateralFilter(img_small, size, sigma_color,sigma_space)
img_output = cv2.resize(img_small, None, fx=ds_factor, fy=ds_factor,
interpolation=cv2.INTER_LINEAR)
dst = np.zeros(img_gray.shape)
# Add the thick boundary lines to the image using 'AND' operator
dst = cv2.bitwise_and(img_output, img_output, mask=mask)
return dst
if __name__=='__main__':
cap = cv2.VideoCapture(0)
cur_char = -1
prev_char = -1
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_AREA)
c = cv2.waitKey(1)
if c == 27:break
if c > -1 and c != prev_char:
cur_char = c
prev_char = c
if cur_char == ord('s'):
cv2.imshow('Cartoonize', cartoonize_image(frame,sketch_mode=True))
elif cur_char == ord('c'):
cv2.imshow('Cartoonize', cartoonize_image(frame,sketch_mode=False))
else:
cv2.imshow('Cartoonize', frame)
cap.release()
cv2.destroyAllWindows()
|
# Credits
# https://www.geeksforgeeks.org/create-an-empty-file-using-python/
# https://www.geeksforgeeks.org/create-a-directory-in-python/
# https://pythonguides.com/python-copy-file/
import subprocess
import re
import os
import shutil
def log_rotate_configure(user_name):
subprocess.call(['apt-get', 'install', '-y', 'logrotate'])
with open('/etc/logrotate.conf', "w") as opened_file:
pass
os.mkdir(r'/home/' + user_name + r'/config_backups')
shutil.copyfile(r'/etc/logrotate.conf', r'/home/' +
user_name + r'/config_backups/logrotate.conf.backup')
daily_regex = str('(^\s*[#]*\s*' + r'daily' + '\s*$)|(^\s*[#]*\s*' +
r'weekly' + '\s*$)|(^\s*[#]*\s*' + r'monthly' + '\s*$)')
daily_replace = str(
r'daily')
minsize_regex = str('^\s*[#]*\s*' + r'minsize' + '.*$')
minsize_replace = str(r'minsize 100M')
rotate_regex = str('^\s*[#]*\s*' + r'rotate' + '\s*[0-9]*$')
rotate_replace = str(r'rotate 4')
compress_regex = str('^\s*[#]*\s*' + r'compress' + '\s*$')
compress_replace = str(r'compress')
create_regex = str('^\s*[#]*\s*' + r'create' + '\s*$')
create_replace = str(r'create')
with open('/etc/logrotate.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/logrotate.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(daily_regex, daily_replace, line,))
if daily_replace == line.strip():
break
else:
opened_file.write(daily_replace + '\n')
with open('/etc/logrotate.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/logrotate.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(minsize_regex, minsize_replace, line,))
if minsize_replace == line.strip():
break
else:
opened_file.write(minsize_replace + '\n')
with open('/etc/logrotate.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/logrotate.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(rotate_regex, rotate_replace, line,))
if rotate_replace == line.strip():
break
else:
opened_file.write(rotate_replace + '\n')
with open('/etc/logrotate.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/logrotate.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(compress_regex, compress_replace, line,))
if compress_replace == line.strip():
break
else:
opened_file.write(compress_replace + '\n')
with open('/etc/logrotate.conf', "r") as opened_file:
lines = opened_file.readlines()
with open('/etc/logrotate.conf', "w") as opened_file:
for line in lines:
opened_file.write(
re.sub(create_regex, create_replace, line,))
if create_replace == line.strip():
break
else:
opened_file.write(create_replace + '\n')
|
# -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/Coverage
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import typing
from pydantic import Field
from . import backboneelement, domainresource, fhirtypes
class Coverage(domainresource.DomainResource):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Insurance or medical plan or a payment agreement.
Financial instrument which may be used to reimburse or pay for health care
products and services.
"""
resource_type = Field("Coverage", const=True)
beneficiary: fhirtypes.ReferenceType = Field(
None,
alias="beneficiary",
title="Plan Beneficiary",
description=(
"The party who benefits from the insurance coverage., the patient when "
"services are provided."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient"],
)
contract: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="contract",
title="Contract details",
description="The policy(s) which constitute this insurance coverage.",
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Contract"],
)
dependent: fhirtypes.String = Field(
None,
alias="dependent",
title="Dependent number",
description="A unique identifier for a dependent under the coverage.",
# if property is element of this resource.
element_property=True,
)
dependent__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_dependent", title="Extension field for ``dependent``."
)
grouping: fhirtypes.CoverageGroupingType = Field(
None,
alias="grouping",
title="Additional coverage classifications",
description=(
"A suite of underwrite specific classifiers, for example may be used to"
" identify a class of coverage or employer group, Policy, Plan."
),
# if property is element of this resource.
element_property=True,
)
identifier: typing.List[fhirtypes.IdentifierType] = Field(
None,
alias="identifier",
title="The primary coverage ID",
description=(
"The main (and possibly only) identifier for the coverage - often "
"referred to as a Member Id, Certificate number, Personal Health Number"
" or Case ID. May be constructed as the concatination of the "
"Coverage.SubscriberID and the Coverage.dependant."
),
# if property is element of this resource.
element_property=True,
)
network: fhirtypes.String = Field(
None,
alias="network",
title="Insurer network",
description=(
"The insurer-specific identifier for the insurer-defined network of "
"providers to which the beneficiary may seek treatment which will be "
"covered at the 'in-network' rate, otherwise 'out of network' terms and"
" conditions apply."
),
# if property is element of this resource.
element_property=True,
)
network__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_network", title="Extension field for ``network``."
)
order: fhirtypes.PositiveInt = Field(
None,
alias="order",
title="Relative order of the coverage",
description=(
"The order of applicability of this coverage relative to other "
"coverages which are currently inforce. Note, there may be gaps in the "
"numbering and this does not imply primary, secondard etc. as the "
"specific positioning of coverages depends upon the episode of care."
),
# if property is element of this resource.
element_property=True,
)
order__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_order", title="Extension field for ``order``."
)
payor: typing.List[fhirtypes.ReferenceType] = Field(
None,
alias="payor",
title="Identifier for the plan or agreement issuer",
description=(
"The program or plan underwriter or payor including both insurance and "
"non-insurance agreements, such as patient-pay agreements. May provide "
"multiple identifiers such as insurance company identifier or business "
"identifier (BIN number)."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Organization", "Patient", "RelatedPerson"],
)
period: fhirtypes.PeriodType = Field(
None,
alias="period",
title="Coverage start and end dates",
description=(
"Time period during which the coverage is in force. A missing start "
"date indicates the start date isn't known, a missing end date means "
"the coverage is continuing to be in force."
),
# if property is element of this resource.
element_property=True,
)
policyHolder: fhirtypes.ReferenceType = Field(
None,
alias="policyHolder",
title="Owner of the policy",
description=(
"The party who 'owns' the insurance policy, may be an individual, "
"corporation or the subscriber's employer."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient", "RelatedPerson", "Organization"],
)
relationship: fhirtypes.CodeableConceptType = Field(
None,
alias="relationship",
title="Beneficiary relationship to the Subscriber",
description="The relationship of beneficiary (patient) to the subscriber.",
# if property is element of this resource.
element_property=True,
)
sequence: fhirtypes.String = Field(
None,
alias="sequence",
title="The plan instance or sequence counter",
description=(
"An optional counter for a particular instance of the identified "
"coverage which increments upon each renewal."
),
# if property is element of this resource.
element_property=True,
)
sequence__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_sequence", title="Extension field for ``sequence``."
)
status: fhirtypes.Code = Field(
None,
alias="status",
title="active | cancelled | draft | entered-in-error",
description="The status of the resource instance.",
# if property is element of this resource.
element_property=True,
# note: Enum values can be used in validation,
# but use in your own responsibilities, read official FHIR documentation.
enum_values=["active", "cancelled", "draft", "entered-in-error"],
)
status__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_status", title="Extension field for ``status``."
)
subscriber: fhirtypes.ReferenceType = Field(
None,
alias="subscriber",
title="Subscriber to the policy",
description=(
"The party who has signed-up for or 'owns' the contractual relationship"
" to the policy or to whom the benefit of the policy for services "
"rendered to them or their family is due."
),
# if property is element of this resource.
element_property=True,
# note: Listed Resource Type(s) should be allowed as Reference.
enum_reference_types=["Patient", "RelatedPerson"],
)
subscriberId: fhirtypes.String = Field(
None,
alias="subscriberId",
title="ID assigned to the Subscriber",
description="The insurer assigned ID for the Subscriber.",
# if property is element of this resource.
element_property=True,
)
subscriberId__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subscriberId", title="Extension field for ``subscriberId``."
)
type: fhirtypes.CodeableConceptType = Field(
None,
alias="type",
title="Type of coverage such as medical or accident",
description=(
"The type of coverage: social program, medical plan, accident coverage "
"(workers compensation, auto), group health or payment by an individual"
" or organization."
),
# if property is element of this resource.
element_property=True,
)
class CoverageGrouping(backboneelement.BackboneElement):
"""Disclaimer: Any field name ends with ``__ext`` does't part of
Resource StructureDefinition, instead used to enable Extensibility feature
for FHIR Primitive Data Types.
Additional coverage classifications.
A suite of underwrite specific classifiers, for example may be used to
identify a class of coverage or employer group, Policy, Plan.
"""
resource_type = Field("CoverageGrouping", const=True)
classDisplay: fhirtypes.String = Field(
None,
alias="classDisplay",
title="Display text for the class",
description="A short description for the class.",
# if property is element of this resource.
element_property=True,
)
classDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_classDisplay", title="Extension field for ``classDisplay``."
)
class_fhir: fhirtypes.String = Field(
None,
alias="class",
title="An identifier for the class",
description=(
"Identifies a style or collective of coverage issues by the "
"underwriter, for example may be used to identify a class of coverage "
"such as a level of deductables or co-payment."
),
# if property is element of this resource.
element_property=True,
)
class__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_class", title="Extension field for ``class_fhir``."
)
group: fhirtypes.String = Field(
None,
alias="group",
title="An identifier for the group",
description=(
"Identifies a style or collective of coverage issued by the "
"underwriter, for example may be used to identify an employer group. "
"May also be referred to as a Policy or Group ID."
),
# if property is element of this resource.
element_property=True,
)
group__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_group", title="Extension field for ``group``."
)
groupDisplay: fhirtypes.String = Field(
None,
alias="groupDisplay",
title="Display text for an identifier for the group",
description="A short description for the group.",
# if property is element of this resource.
element_property=True,
)
groupDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_groupDisplay", title="Extension field for ``groupDisplay``."
)
plan: fhirtypes.String = Field(
None,
alias="plan",
title="An identifier for the plan",
description=(
"Identifies a style or collective of coverage issued by the "
"underwriter, for example may be used to identify a collection of "
"benefits provided to employees. May be referred to as a Section or "
"Division ID."
),
# if property is element of this resource.
element_property=True,
)
plan__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_plan", title="Extension field for ``plan``."
)
planDisplay: fhirtypes.String = Field(
None,
alias="planDisplay",
title="Display text for the plan",
description="A short description for the plan.",
# if property is element of this resource.
element_property=True,
)
planDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_planDisplay", title="Extension field for ``planDisplay``."
)
subClass: fhirtypes.String = Field(
None,
alias="subClass",
title="An identifier for the subsection of the class",
description=(
"Identifies a sub-style or sub-collective of coverage issues by the "
"underwriter, for example may be used to identify a subclass of "
"coverage such as a sub-level of deductables or co-payment."
),
# if property is element of this resource.
element_property=True,
)
subClass__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subClass", title="Extension field for ``subClass``."
)
subClassDisplay: fhirtypes.String = Field(
None,
alias="subClassDisplay",
title="Display text for the subsection of the subclass",
description="A short description for the subclass.",
# if property is element of this resource.
element_property=True,
)
subClassDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subClassDisplay", title="Extension field for ``subClassDisplay``."
)
subGroup: fhirtypes.String = Field(
None,
alias="subGroup",
title="An identifier for the subsection of the group",
description=(
"Identifies a style or collective of coverage issued by the "
"underwriter, for example may be used to identify a subset of an "
"employer group."
),
# if property is element of this resource.
element_property=True,
)
subGroup__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subGroup", title="Extension field for ``subGroup``."
)
subGroupDisplay: fhirtypes.String = Field(
None,
alias="subGroupDisplay",
title="Display text for the subsection of the group",
description="A short description for the subgroup.",
# if property is element of this resource.
element_property=True,
)
subGroupDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subGroupDisplay", title="Extension field for ``subGroupDisplay``."
)
subPlan: fhirtypes.String = Field(
None,
alias="subPlan",
title="An identifier for the subsection of the plan",
description=(
"Identifies a sub-style or sub-collective of coverage issued by the "
"underwriter, for example may be used to identify a subset of a "
"collection of benefits provided to employees."
),
# if property is element of this resource.
element_property=True,
)
subPlan__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subPlan", title="Extension field for ``subPlan``."
)
subPlanDisplay: fhirtypes.String = Field(
None,
alias="subPlanDisplay",
title="Display text for the subsection of the plan",
description="A short description for the subplan.",
# if property is element of this resource.
element_property=True,
)
subPlanDisplay__ext: fhirtypes.FHIRPrimitiveExtensionType = Field(
None, alias="_subPlanDisplay", title="Extension field for ``subPlanDisplay``."
)
|
<gh_stars>0
import constants
from encoder import EncoderRNN
from decoder import AttnDecoderRNN
from util import time_str
from logger import log, write_training_log, save_dataframe, plot_and_save_histories
import time
from collections import OrderedDict
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
class Seq2Seq(nn.Module):
def __init__(self, input_size, output_size, hidden_size,
learning_rate, teacher_forcing_ratio, device):
super(Seq2Seq, self).__init__()
self.teacher_forcing_ratio = teacher_forcing_ratio
self.device = device
self.encoder = EncoderRNN(input_size, hidden_size)
self.decoder = AttnDecoderRNN(hidden_size, output_size)
self.encoder_optimizer = optim.SGD(self.encoder.parameters(), lr=learning_rate)
self.decoder_optimizer = optim.SGD(self.decoder.parameters(), lr=learning_rate)
self.criterion = nn.NLLLoss()
def train(self, input_tensor, target_tensor, max_length=constants.MAX_LENGTH):
encoder_hidden = self.encoder.initHidden()
self.encoder_optimizer.zero_grad()
self.decoder_optimizer.zero_grad()
input_length = input_tensor.size(0)
target_length = target_tensor.size(0)
encoder_outputs = torch.zeros(max_length + 1, self.encoder.hidden_size, device=self.device)
loss = 0
for ei in range(input_length):
encoder_output, encoder_hidden = self.encoder(input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[constants.SOS_TOKEN]], device=self.device)
decoder_hidden = encoder_hidden
use_teacher_forcing = True if np.random.random() < self.teacher_forcing_ratio else False
if use_teacher_forcing:
# Teacher forcing: feed the target as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, encoder_outputs)
loss += self.criterion(decoder_output, target_tensor[di])
decoder_input = target_tensor[di] # Teacher forcing
else:
# Without teacher forcing: use its own prediction as the next input
for di in range(target_length):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # detach from history as input
loss += self.criterion(decoder_output, target_tensor[di])
if decoder_input.item() == constants.EOS_TOKEN:
break
loss.backward()
self.encoder_optimizer.step()
self.decoder_optimizer.step()
return loss.item() / target_length
def trainIters(self, env, evaluator):
start_total_time = time.time() - env.total_training_time
start_epoch_time = time.time() # Reset every LOG_EVERY iterations
start_train_time = time.time() # Reset every LOG_EVERY iterations
total_loss = 0 # Reset every LOG_EVERY iterations
for iter in range(env.iters_completed + 1, constants.NUM_ITER + 1):
row = env.train_methods.iloc[np.random.randint(len(env.train_methods))]
input_tensor = row['source']
target_tensor = row['name']
loss = self.train(input_tensor, target_tensor)
total_loss += loss
if iter % constants.LOG_EVERY == 0:
log('Completed {} iterations'.format(iter))
train_time_elapsed = time.time() - start_train_time
log('Evaluating on validation set')
start_eval_time = time.time()
names = evaluator.evaluate(self)
log('Saving the calculated metrics')
save_dataframe(names, constants.VALIDATION_NAMES_FILE.format(iter))
eval_time_elapsed = time.time() - start_eval_time
env.history = env.history.append({
'Loss': total_loss / constants.LOG_EVERY,
'Precision': names['Precision'].mean(),
'Recall': names['Recall'].mean(),
'F1': names['F1'].mean(),
'ExactMatch': names['ExactMatch'].mean(),
'num_names': len(names['GeneratedName'].unique())
}, ignore_index=True)
epoch_time_elapsed = time.time() - start_epoch_time
total_time_elapsed = time.time() - start_total_time
env.total_training_time = total_time_elapsed
history_last_row = env.history.iloc[-1]
log_dict = OrderedDict([
("Iteration", '{}/{} ({:.1f}%)'.format(
iter, constants.NUM_ITER, iter / constants.NUM_ITER * 100)),
("Average loss", history_last_row['Loss']),
("Average precision", history_last_row['Precision']),
("Average recall", history_last_row['Recall']),
("Average F1", history_last_row['F1']),
("Exact match", history_last_row['ExactMatch']),
("Unique names", int(history_last_row['num_names'])),
("Epoch time", time_str(epoch_time_elapsed)),
("Training time", time_str(train_time_elapsed)),
("Evaluation time", time_str(eval_time_elapsed)),
("Total training time", time_str(total_time_elapsed))
])
write_training_log(log_dict, constants.TRAIN_LOG_FILE)
plot_and_save_histories(env.history)
env.iters_completed = iter
env.save_train()
# Reseting counters
total_loss = 0
start_epoch_time = time.time()
start_train_time = time.time()
def forward(self, input_tensor, max_length=constants.MAX_LENGTH, return_attention=False):
encoder_hidden = self.encoder.initHidden()
input_length = input_tensor.size(0)
encoder_outputs = torch.zeros(max_length + 1, self.encoder.hidden_size, device=self.device)
for ei in range(input_length):
encoder_output, encoder_hidden = self.encoder(input_tensor[ei], encoder_hidden)
encoder_outputs[ei] = encoder_output[0, 0]
decoder_input = torch.tensor([[constants.SOS_TOKEN]], device=self.device)
decoder_hidden = encoder_hidden
decoded_words = []
attention_vectors = []
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = self.decoder(
decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
decoded_words.append(topi.item())
attention_vectors.append(decoder_attention.tolist()[0])
if decoded_words[-1] == constants.EOS_TOKEN:
break
decoder_input = topi.squeeze().detach()
if return_attention:
return decoded_words, attention_vectors
else:
return decoded_words
|
<filename>tests/objects/server/test_runmode.py
import unittest
from pyiron_base.objects.server.runmode import Runmode
class TestRunmode(unittest.TestCase):
def setUp(self):
self.run_mode_default = Runmode()
self.run_mode_modal = Runmode()
self.run_mode_modal.mode = 'modal'
self.run_mode_non_modal = Runmode()
self.run_mode_non_modal.mode = 'non_modal'
self.run_mode_queue = Runmode()
self.run_mode_queue.mode = 'queue'
self.run_mode_manual = Runmode()
self.run_mode_manual.mode = 'manual'
def test_modal(self):
self.assertTrue(self.run_mode_default.modal)
self.assertTrue(self.run_mode_modal.modal)
self.assertFalse(self.run_mode_non_modal.modal)
self.assertFalse(self.run_mode_queue.modal)
self.assertFalse(self.run_mode_manual.modal)
def test_non_modal(self):
self.assertFalse(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_modal.non_modal)
self.assertTrue(self.run_mode_non_modal.non_modal)
self.assertFalse(self.run_mode_queue.non_modal)
self.assertFalse(self.run_mode_manual.non_modal)
def test_queue(self):
self.assertFalse(self.run_mode_default.queue)
self.assertFalse(self.run_mode_modal.queue)
self.assertFalse(self.run_mode_non_modal.queue)
self.assertTrue(self.run_mode_queue.queue)
self.assertFalse(self.run_mode_manual.queue)
def test_manual(self):
self.assertFalse(self.run_mode_default.manual)
self.assertFalse(self.run_mode_modal.manual)
self.assertFalse(self.run_mode_non_modal.manual)
self.assertFalse(self.run_mode_queue.manual)
self.assertTrue(self.run_mode_manual.manual)
def test_mode(self):
self.run_mode_default.mode = 'non_modal'
self.assertFalse(self.run_mode_default.modal)
self.assertTrue(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
self.run_mode_default.mode = 'queue'
self.assertFalse(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertTrue(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
self.run_mode_default.mode = 'manual'
self.assertFalse(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertTrue(self.run_mode_default.manual)
self.run_mode_default.mode = 'modal'
self.assertTrue(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
def test_setter(self):
self.run_mode_default.non_modal = True
self.assertFalse(self.run_mode_default.modal)
self.assertTrue(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
self.run_mode_default.queue = True
self.assertFalse(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertTrue(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
self.run_mode_default.manual = True
self.assertFalse(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertTrue(self.run_mode_default.manual)
self.run_mode_default.modal = True
self.assertTrue(self.run_mode_default.modal)
self.assertFalse(self.run_mode_default.non_modal)
self.assertFalse(self.run_mode_default.queue)
self.assertFalse(self.run_mode_default.manual)
if __name__ == '__main__':
unittest.main() |
<reponame>kasev/textnet
### these should go easy
import sys
import pandas as pd
pd.options.mode.chained_assignment = None # default='warn'
pd.set_option('display.max_rows', 150)
import numpy as np
import os
import string
import collections
import math
import random
import statistics as stat
import re
import unicodedata
import json
# Natural Language Processing Toolkit - we use it especially for building bigrams
import nltk
from nltk.collocations import *
### Beautiful Soup and Urllib
### for scrapping of web data and parsing xml files
from urllib.request import urlopen
# urllib and requests do basically the same, but my preferences are changing all the time, so let's import both
from urllib.parse import quote
import requests
from bs4 import BeautifulSoup
### in some cases I prefer Element Tree
import xml.etree.cElementTree as ET
### for visualization
# in some cases I use matplotlib, which is much easier to configure, elsewhere I prefer Plotly, which is more "sexy"
import matplotlib.pyplot as plt
from PIL import Image
import seaborn as sns
### to generate wordcloud data
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator, get_single_color_func
# There is a lot of changes in Plotly nowadays. Perhaps some modifications of the code will be needed at some point
import plotly
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
plotly.offline.init_notebook_mode(connected=True)
### for network analysis
import networkx as nx
def network_formation_df(dataset, column, book_abbr, lexicon_size, threshold):
'''From a dataframe with rows corresponding to individual documents,
to be subsellected on the basis of author's name column, for instance'''
lemmata_list = dataset[book_abbr][column]
lemmata_list = [lemma for lemma in lemmata_list if lemma != "být"]
lemmata_list = [lemma for lemma in lemmata_list if lemma != "εἰμί"]
lexicon = [word_tuple[0] for word_tuple in nltk.FreqDist(lemmata_list).most_common(lexicon_size)]
bigrams_list = []
for bigram in nltk.bigrams([lemma for lemma in lemmata_list if lemma != "být"]):
if ((bigram[0] in lexicon) & (bigram[1] in lexicon)):
if bigram[0] != bigram[1]:
bigrams_list.append(tuple(sorted(bigram)))
bigrams_counts = list((collections.Counter(bigrams_list)).items())
bigrams_counts = sorted(bigrams_counts, key=lambda x: x[1], reverse=True)
### create a NetworkX object
G = nx.Graph()
G.clear()
### form the network from tuples of this form: (node1, node2, number of co-occurrences / lenght of the document)
G.add_weighted_edges_from(np.array([(bigram_count[0][0], bigram_count[0][1], int(bigram_count[1])) for bigram_count in bigrams_counts if bigram_count[1] >= threshold]))
### add edges attributes
for (u, v, wt) in G.edges.data('weight'):
G[u][v]["weight"] = int(wt)
total_weight = sum([int(n) for n in nx.get_edge_attributes(G, "weight").values()])
for (u, v) in G.edges:
G[u][v]["norm_weight"] = round((G[u][v]["weight"] / total_weight), 5)
G[u][v]["distance"] = round(1 / (G[u][v]["weight"]), 5)
G[u][v]["norm_distance"] = round(1 / (G[u][v]["norm_weight"] ), 5)
return G
def network_from_lemmata_list(lemmata_list, lexicon_size, threshold):
'''From a list of words'''
lemmata_list = [lemma for lemma in lemmata_list if lemma != "být"]
lemmata_list = [lemma for lemma in lemmata_list if lemma != "εἰμί"]
lexicon = [word_tuple[0] for word_tuple in nltk.FreqDist(lemmata_list).most_common(lexicon_size)]
bigrams_list = []
for bigram in nltk.bigrams([lemma for lemma in lemmata_list if lemma != "být"]):
if ((bigram[0] in lexicon) & (bigram[1] in lexicon)):
if bigram[0] != bigram[1]:
bigrams_list.append(tuple(sorted(bigram)))
bigrams_counts = list((collections.Counter(bigrams_list)).items())
bigrams_counts = sorted(bigrams_counts, key=lambda x: x[1], reverse=True)
### create a NetworkX object
G = nx.Graph()
G.clear()
### form the network from tuples of this form: (node1, node2, number of co-occurrences / lenght of the document)
G.add_weighted_edges_from(np.array([(bigram_count[0][0], bigram_count[0][1], int(bigram_count[1])) for bigram_count in bigrams_counts if bigram_count[1] >= threshold]))
### add edges attributes
for (u, v, wt) in G.edges.data('weight'):
G[u][v]["weight"] = int(wt)
total_weight = sum([int(n) for n in nx.get_edge_attributes(G, "weight").values()])
for (u, v) in G.edges:
G[u][v]["norm_weight"] = round((G[u][v]["weight"] / total_weight), 5)
G[u][v]["distance"] = round(1 / (G[u][v]["weight"]), 5)
G[u][v]["norm_distance"] = round(1 / (G[u][v]["norm_weight"] ), 5)
return G
def network_by_author(dataset, column, book_abbr, lexicon_size, threshold):
'''From a dataframe with rows corresponding to individual documents,
to be subsellected on the basis of author's name column, for instance'''
works = dataset[dataset["author"]==book_abbr][column].tolist()
works_merged = [item for sublist in works for item in sublist]
lexicon = [word_tuple[0] for word_tuple in nltk.FreqDist(works_merged).most_common(lexicon_size)]
bigrams_list = []
for work in works:
for bigram in nltk.bigrams([lemma for lemma in work if lemma != "εἰμί"]):
if ((bigram[0] in lexicon) & (bigram[1] in lexicon)):
if bigram[0] != bigram[1]:
bigrams_list.append(tuple(sorted(bigram)))
bigrams_counts = list((collections.Counter(bigrams_list)).items())
bigrams_counts = sorted(bigrams_counts, key=lambda x: x[1], reverse=True)
### create a NetworkX object
G = nx.Graph()
G.clear()
### form the network from tuples of this form: (node1, node2, number of co-occurrences / lenght of the document)
G.add_weighted_edges_from(np.array([(bigram_count[0][0], bigram_count[0][1], int(bigram_count[1])) for bigram_count in bigrams_counts if bigram_count[1] >= threshold]))
### add distance attribute
for (u, v, wt) in G.edges.data('weight'):
G[u][v]["weight"] = int(wt)
total_weight = sum([int(n) for n in nx.get_edge_attributes(G, "weight").values()])
for (u, v) in G.edges:
G[u][v]["norm_weight"] = round((G[u][v]["weight"] / total_weight), 5)
G[u][v]["distance"] = round(1 / (G[u][v]["weight"]), 5)
G[u][v]["norm_distance"] = round(1 / (G[u][v]["norm_weight"] ), 5)
return G
def draw_2d_network(networkx_object, file_name, mode):
'''take networkX object and draw it'''
pos_2d=nx.kamada_kawai_layout(networkx_object, weight="weight_norm")
nx.set_node_attributes(networkx_object, pos_2d, "pos_2d")
dmin=1
ncenter=0
Edges = list(networkx_object.edges)
L=len(Edges)
labels= list(networkx_object.nodes)
N = len(labels)
distance_list = [float(distance[2]) for distance in list(networkx_object.edges.data("distance"))]
weight_list = [int(float(weight[2])) for weight in list(networkx_object.edges.data("weight"))]
for n in pos_2d:
x,y=pos_2d[n]
d=(x-0.5)**2+(y-0.5)**2
if d<dmin:
ncenter=n
dmin=d
p =nx.single_source_shortest_path_length(networkx_object, ncenter)
adjc= [len(one_adjc) for one_adjc in list((nx.generate_adjlist(networkx_object)))]
middle_node_trace = go.Scatter(
x=[],
y=[],
opacity=0,
text=weight_list,
mode='markers',
hoverinfo='text',
marker=dict(
opacity=0
)
)
for Edge in Edges:
x0,y0 = networkx_object.nodes[Edge[0]]["pos_2d"]
x1,y1 = networkx_object.nodes[Edge[1]]["pos_2d"]
middle_node_trace['x'] += tuple([(x0+x1)/2])
middle_node_trace['y'] += tuple([(y0+y1)/2])
edge_trace1 = go.Scatter(
x=[], y=[],
#hoverinfo='none',
mode='lines',
line=dict(width=1,color="#000000"),
)
edge_trace2 = go.Scatter(
x=[],y=[],
#hoverinfo='none',
mode='lines',
line=dict(width=0.7,color="#404040"),
)
edge_trace3 = go.Scatter(
x=[], y=[],
#hoverinfo='none',
mode='lines',
line=dict(width=0.5,color="#C0C0C0"),
)
best_5percent_norm_weight = sorted(list(networkx_object.edges.data("norm_weight")), key=lambda x: x[2], reverse=True)[int((len(networkx_object.edges.data("norm_weight")) / 100) * 5)][2]
best_20percent_norm_weight = sorted(list(networkx_object.edges.data("norm_weight")), key=lambda x: x[2], reverse=True)[int((len(networkx_object.edges.data("norm_weight")) / 100) * 20)][2]
for edge in networkx_object.edges.data():
if edge[2]["norm_weight"] >= best_5percent_norm_weight:
x0, y0 = networkx_object.nodes[edge[0]]['pos_2d']
x1, y1 = networkx_object.nodes[edge[1]]['pos_2d']
edge_trace1['x'] += tuple([x0, x1, None])
edge_trace1['y'] += tuple([y0, y1, None])
else:
if edge[2]["norm_weight"] >= best_20percent_norm_weight:
x0, y0 = networkx_object.nodes[edge[0]]['pos_2d']
x1, y1 = networkx_object.nodes[edge[1]]['pos_2d']
edge_trace2['x'] += tuple([x0, x1, None])
edge_trace2['y'] += tuple([y0, y1, None])
else:
x0, y0 = networkx_object.nodes[edge[0]]['pos_2d']
x1, y1 = networkx_object.nodes[edge[1]]['pos_2d']
edge_trace3['x'] += tuple([x0, x1, None])
edge_trace3['y'] += tuple([y0, y1, None])
node_trace = go.Scatter(
x=[],
y=[],
#name=[],
text=[],
textposition='bottom center',
mode='markers+text',
hovertext=adjc,
hoverinfo='text',
marker=dict(
###showscale=True,
showscale=False, ### change to see scale
colorscale='Greys',
reversescale=True,
color=[],
size=7,
colorbar=dict(
thickness=15,
title='degree',
xanchor='left',
titleside='right'
),
line=dict(width=1)
)
)
for node in networkx_object.nodes():
x, y = networkx_object.nodes[node]['pos_2d']
node_trace['x'] += tuple([x])
node_trace['y'] += tuple([y])
node_trace["text"] += tuple([node])
### original version: node_trace["text"] += tuple([node])
### Color Node Points
for node, adjacencies in enumerate(nx.generate_adjlist(networkx_object)):
node_trace['marker']['color'] += tuple([len(adjacencies)])
###node_info = ' of connections: '+str(len(adjacencies))
###node_trace['something'].append(node_info)
fig = go.Figure(data=[edge_trace1, edge_trace2, edge_trace3, node_trace, middle_node_trace],
layout=go.Layout(
plot_bgcolor='rgba(0,0,0,0)',
autosize=False,
width=500,
height=500,
#title=file_name,
titlefont=dict(size=16),
showlegend=False,
hovermode='closest',
margin=dict(b=10,l=10,r=10, t=10),
xaxis=dict(showgrid=False, zeroline=False, showticklabels=False),
yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)
))
if mode=="offline":
return iplot(fig, filename=gdrive_root + "figures/nt_cep_networks" + file_name +".html")
if mode=="online":
return iplot(fig, filename=file_name)
if mode=="file":
return plot(fig, filename=gdrive_root + "figures/nt_cep_networks/" + file_name + ".png" , auto_open=False)
def draw_3d_network(networkx_object, file_name, mode):
'''take networkX object and draw it in 3D'''
Edges = list(networkx_object.edges)
L=len(Edges)
distance_list = [distance[2] for distance in list(networkx_object.edges.data("distance"))]
weight_list = [int(float(weight[2])) for weight in list(networkx_object.edges.data("weight"))]
labels= list(networkx_object.nodes)
N = len(labels)
adjc= [len(one_adjc) for one_adjc in list((nx.generate_adjlist(networkx_object)))] ### instead of "group"
pos_3d=nx.spring_layout(networkx_object, weight="weight", dim=3)
nx.set_node_attributes(networkx_object, pos_3d, "pos_3d")
layt = [list(array) for array in pos_3d.values()]
N= len(networkx_object.nodes)
Xn=[layt[k][0] for k in range(N)]# x-coordinates of nodes
Yn=[layt[k][1] for k in range(N)]# y-coordinates
Zn=[layt[k][2] for k in range(N)]# z-coordinates
Xe=[]
Ye=[]
Ze=[]
for Edge in Edges:
Xe+=[networkx_object.nodes[Edge[0]]["pos_3d"][0],networkx_object.nodes[Edge[1]]["pos_3d"][0], None]# x-coordinates of edge ends
Ye+=[networkx_object.nodes[Edge[0]]["pos_3d"][1],networkx_object.nodes[Edge[1]]["pos_3d"][1], None]
Ze+=[networkx_object.nodes[Edge[0]]["pos_3d"][2],networkx_object.nodes[Edge[1]]["pos_3d"][2], None]
### to get the hover into the middle of the line
### we have to produce a node in the middle of the line
### based on https://stackoverflow.com/questions/46037897/line-hover-text-in-plotly
middle_node_trace = go.Scatter3d(
x=[],
y=[],
z=[],
opacity=0,
text=weight_list,
mode='markers',
hoverinfo='text',
marker=dict(
opacity=0
)
)
for Edge in Edges:
x0,y0,z0 = networkx_object.nodes[Edge[0]]["pos_3d"]
x1,y1,z1 = networkx_object.nodes[Edge[1]]["pos_3d"]
###trace3['x'] += [x0, x1, None]
###trace3['y'] += [y0, y1, None]
###trace3['z'] += [z0, z1, None]
###trace3_list.append(trace3)
middle_node_trace['x'] += tuple([(x0+x1)/2])
middle_node_trace['y'] += tuple([(y0+y1)/2])#.append((y0+y1)/2)
middle_node_trace['z'] += tuple([(z0+z1)/2])#.append((z0+z1)/2)
### edge trace
trace1=go.Scatter3d(x=Xe,
y=Ye,
z=Ze,
mode='lines',
line=dict(color='rgb(125,125,125)', width=1),
text=distance_list,
hoverinfo='text',
textposition="top right"
)
### node trace
trace2=go.Scatter3d(x=Xn,
y=Yn,
z=Zn,
mode='markers+text',
###name=labels,
marker=dict(symbol='circle',
size=6,
color=adjc,
colorscale='Earth',
reversescale=True,
line=dict(color='rgb(50,50,50)', width=0.5)
),
text=[],
#textposition='bottom center',
#hovertext=adjc,
#hoverinfo='text'
)
for node in networkx_object.nodes():
trace2["text"] += tuple([node])
axis=dict(showbackground=False,
showline=False,
zeroline=False,
showgrid=False,
showticklabels=False,
title=''
)
layout = go.Layout(
plot_bgcolor='rgba(0,0,0,0)',
title="",
width=900,
height=700,
showlegend=False,
scene=dict(
xaxis=dict(axis),
yaxis=dict(axis),
zaxis=dict(axis),
),
margin=dict(
t=100
),
hovermode='closest',
annotations=[
dict(
showarrow=False,
text="",
xref='paper',
yref='paper',
x=0,
y=0.1,
xanchor='left',
yanchor='bottom',
font=dict(
size=14
)
)
], )
data=[trace1, trace2, middle_node_trace]
fig=go.Figure(data=data, layout=layout)
if mode=="offline":
return iplot(fig) ###, filename=file_name+".html")
if mode=="online":
return iplot(fig, filename=file_name)
if mode=="eps":
return pio.write_image(fig, "images/" + file_name + "_3D.eps" , scale=1)
def ego_network_drawing_reduced(network, term, num_of_neighbours, title, mode, dimensions):
'''derrive ego network from a preexisting network
specify source term and number of neighbors
includes only shortest paths from the source'''
length, path = nx.single_source_dijkstra(network, term, target=None, weight="distance")
shortest_nodes = list(length.keys())[0:num_of_neighbours+1]
path_values_sorted = [dict_pair[1] for dict_pair in sorted(path.items(), key=lambda pair: list(length.keys()).index(pair[0]))]
path_edges = []
for path_to_term in path_values_sorted[1:num_of_neighbours+1]:
path_edges.extend([tuple(sorted(bigram)) for bigram in nltk.bigrams(path_to_term)])
shortest_edges = list(set(path_edges))
ego_network = network.copy(as_view=False)
nodes_to_remove = []
for node in ego_network.nodes:
if node not in shortest_nodes:
nodes_to_remove.append(node)
for element in nodes_to_remove:
ego_network.remove_node(element)
edges_to_remove = []
for edge in ego_network.edges:
if edge not in shortest_edges:
if (edge[1],edge[0]) not in shortest_edges:
edges_to_remove.append(edge)
for element in edges_to_remove:
ego_network.remove_edge(element[0], element[1])
if dimensions == "2D":
return draw_2d_network(ego_network, title, mode)
if dimensions == "3D":
return draw_3d_network(ego_network, title, mode)
def ego_network_standard(dataset, column, book_abbr, term, mode, dimensions):
if isinstance(dataset, pd.DataFrame) == True:
network = network_by_author(dataset, column, book_abbr, 500, 1)
else:
network = network_formation_df(dataset, column, book_abbr, 500, 1)
ego_network_drawing_reduced(network, term, 30, book_abbr + " - " + term, mode, dimensions)
def ego_network_closest(dataset, column, book_abbr, term, num_of_neighbours):
if isinstance(dataset, pd.DataFrame) == True:
network = network_by_author(dataset, column, book_abbr, 500, 1)
else:
network = network_formation_df(dataset, book_abbr, 500, 1)
length, path = nx.single_source_dijkstra(network, term, target=None, weight="distance")
length_sorted = sorted(length.items(), key=lambda x:x[1])[1:num_of_neighbours+1]
length_sorted_trans = [(translator_short(tup[0]), round(tup[1], 3)) for tup in length_sorted]
return length_sorted_trans
def ego_network_list_from_list(lemmata_list, term, num_of_neighbours):
network = network_from_lemmata_list(lemmata_list, 500, 1)
try:
length, path = nx.single_source_dijkstra(network, term, target=None, weight="distance")
length_sorted = sorted(length.items(), key=lambda x:x[1])[1:num_of_neighbours+1]
length_sorted_trans = [(tup[0], list_of_meanings(tup[0]), round(tup[1], 3)) for tup in length_sorted]
return length_sorted_trans
except:
return []
def ego_network_data(dataset, column, book_abbr, term, num_of_neighbours):
'''create network and ego network on its basis
specify source term and number of neighbors
includes only shortest paths from the source'''
if isinstance(dataset, pd.DataFrame) == True:
network = network_by_author(dataset, column, book_abbr, 500, 1)
else:
network = network_formation_df(dataset, column, book_abbr, 500, 1)
length, path = nx.single_source_dijkstra(network, term, target=None, weight="distance")
shortest_nodes = list(length.keys())[0:num_of_neighbours+1]
path_values_sorted = [dict_pair[1] for dict_pair in sorted(path.items(), key=lambda pair: list(length.keys()).index(pair[0]))]
path_edges = []
for path_to_term in path_values_sorted[1:num_of_neighbours+1]:
path_edges.extend([tuple(sorted(bigram)) for bigram in nltk.bigrams(path_to_term)])
shortest_edges = list(set(path_edges))
ego_network = network.copy(as_view=False)
nodes_to_remove = []
for node in ego_network.nodes:
if node not in shortest_nodes:
nodes_to_remove.append(node)
for element in nodes_to_remove:
ego_network.remove_node(element)
edges_to_remove = []
for edge in ego_network.edges:
if edge not in shortest_edges:
if (edge[1],edge[0]) not in shortest_edges:
edges_to_remove.append(edge)
for element in edges_to_remove:
ego_network.remove_edge(element[0], element[1])
ego_network_data_prec = sorted(list(ego_network.edges.data("weight")), key=lambda tup: int(tup[2]), reverse=True)
ego_network_data_complete = []
for tup in ego_network_data_prec:
if tup[1] == term:
ego_network_data_complete.append([tup[1], tup[0], int(tup[2]), round(1 / int(tup[2]), 5)])
else:
ego_network_data_complete.append([tup[0], tup[1], int(tup[2]), round(1 / int(tup[2]), 5)])
return ego_network_data_complete
# to work with plotly in google colab environment
def configure_plotly_browser_state():
import IPython
display(IPython.core.display.HTML('''
<script src="/static/components/requirejs/require.js"></script>
<script>
requirejs.config({
paths: {
base: '/static/base',
plotly: 'https://cdn.plot.ly/plotly-latest.min.js?noext',
},
});
</script>
'''))
|
# File: stats.py
# Creation: Saturday December 5th 2020
# Author: <NAME>
# Contact: <EMAIL>
# <EMAIL>
# --------
# Copyright (c) 2020 <NAME>
import json
from collections import defaultdict
def get_user_stats(posts):
"""Get per user statistics.
* :attr:`POST-COUNT` : The cumulative sum of posts.
* :attr:`POST-REACTION-COUNT` : The cumulative sum of post reactions.
* :attr:`BEST-POST-REACTION` : The posts with the highest reactions.
* :attr:`COMMENT-COUNT` : The cumulative sum of comments.
* :attr:`COMMENT-REACTION-COUNT` : The cumulative sum of comments reactions.
* :attr:`BEST-COMMENT-REACTION` : The comments with the highest reactions.
* :attr:`REPLY-COUNT` : The cumulative sum of replies.
* :attr:`REPLY-REACTION-COUNT` : The cumulative sum of replies reactions.
* :attr:`BEST-REPLY-REACTION` : The replies with the highest reactions.
* :attr:`COMMENT-REPLY-COUNT` : The cumulative sum of replies and comments.
* :attr:`REACTION-COUNT` : The cumulative sum of reactions.
* :attr:`REACTION-AHAH` : The cumulative sum of "AHAH" reactions.
* :attr:`REACTION-LOVE` : The cumulative sum of "LOVE" reactions.
* :attr:`REACTION-CARE` : The cumulative sum of "CARE" reactions.
* :attr:`REACTION-WOW` : The cumulative sum of "WOW" reactions.
* :attr:`REACTION-SAD` : The cumulative sum of "SAD" reactions.
* :attr:`REACTION-ANGER` : The cumulative sum of "ANGER" reactions.
* :attr:`REACTION-LIKE` : The cumulative sum of "LIKE" reactions.
Args:
posts (list): List of posts, retrieved from the API.
Returns:
dict
"""
stats = defaultdict(lambda: defaultdict(int))
for post in posts:
post_author = post["user"]
stats[post_author]["POST-COUNT"] += 1
for post_reaction in post["reactions"]:
# People who reacted to the post
stats[post_author]["POST-REACTION-COUNT"] += 1
# People who reacted
reaction_author = post_reaction["user"]
reaction_type = post_reaction["reaction"]
stats[reaction_author]["REACTION-COUNT"] += 1
stats[reaction_author][f"REACTION-{reaction_type.upper()}"] += 1
# Update best stats
stats[post_author]["BEST-POST-REACTION"] = max(stats[post_author]["BEST-POST-REACTION"], len(post["reactions"]))
# look for comments
for comment in post["comments"]:
comment_author = comment["user"]
stats[comment_author]["COMMENT-COUNT"] += 1
stats[comment_author]["COMMENT-REPLY-COUNT"] += 1
for comment_reaction in comment["reactions"]:
# People who reacted to his comment
stats[comment_author]["COMMENT-REACTION-COUNT"] += 1
# People who reacted
reaction_author = comment_reaction["user"]
reaction_type = comment_reaction["reaction"]
stats[reaction_author]["REACTION-COUNT"] += 1
stats[reaction_author][f"REACTION-{reaction_type.upper()}"] += 1
# Look for replies
for reply in comment["replies"]:
reply_author = comment["user"]
stats[reply_author]["REPLY-COUNT"] += 1
stats[reply_author]["COMMENT-REPLY-COUNT"] += 1
for reply_reaction in reply["reactions"]:
# People who reacted to his comment
stats[reply_author]["REPLY-REACTION-COUNT"] += 1
# People who reacted
reaction_author = reply_reaction["user"]
reaction_type = reply_reaction["reaction"]
stats[reaction_author]["REACTION-COUNT"] += 1
stats[reaction_author][f"REACTION-{reaction_type.upper()}"] += 1
# Update best stats
stats[reply_author]["BEST-REPLY-REACTION"] = max(stats[reply_author]["BEST-REPLY-REACTION"], len(reply["reactions"]))
stats[comment_author]["BEST-COMMENT-REACTION"] = max(stats[comment_author]["BEST-COMMENT-REACTION"], len(comment["reactions"]))
return json.loads(json.dumps(stats))
def get_top_stats(posts):
"""Get the sorted top statistics.
* :attr:`POST-COUNT` : The cumulative sum of posts.
* :attr:`POST-REACTION-COUNT` : The cumulative sum of post reactions.
* :attr:`BEST-POST-REACTION` : The posts with the highest reactions.
* :attr:`COMMENT-COUNT` : The cumulative sum of comments.
* :attr:`COMMENT-REACTION-COUNT` : The cumulative sum of comments reactions.
* :attr:`BEST-COMMENT-REACTION` : The comments with the highest reactions.
* :attr:`REPLY-COUNT` : The cumulative sum of replies.
* :attr:`REPLY-REACTION-COUNT` : The cumulative sum of replies reactions.
* :attr:`BEST-REPLY-REACTION` : The replies with the highest reactions.
* :attr:`COMMENT-REPLY-COUNT` : The cumulative sum of replies and comments.
* :attr:`REACTION-COUNT` : The cumulative sum of reactions.
* :attr:`REACTION-AHAH` : The cumulative sum of "AHAH" reactions.
* :attr:`REACTION-LOVE` : The cumulative sum of "LOVE" reactions.
* :attr:`REACTION-CARE` : The cumulative sum of "CARE" reactions.
* :attr:`REACTION-WOW` : The cumulative sum of "WOW" reactions.
* :attr:`REACTION-SAD` : The cumulative sum of "SAD" reactions.
* :attr:`REACTION-ANGER` : The cumulative sum of "ANGER" reactions.
* :attr:`REACTION-LIKE` : The cumulative sum of "LIKE" reactions.
Args:
posts (list): List of posts, retrieved from the API.
Returns:
dict
"""
stats = get_user_stats(posts)
top_stats = {
"POST-COUNT": [],
"POST-REACTION-COUNT": [],
"BEST-POST-REACTION": [],
"COMMENT-COUNT": [],
"COMMENT-REACTION-COUNT": [],
"BEST-COMMENT-REACTION": [],
"REPLY-COUNT": [],
"REPLY-REACTION-COUNT": [],
"BEST-REPLY-REACTION": [],
"COMMENT-REPLY-COUNT": [],
"REACTION-COUNT": [],
"REACTION-AHAH": [],
"REACTION-LOVE": [],
"REACTION-CARE": [],
"REACTION-WOW": [],
"REACTION-SAD": [],
"REACTION-ANGER": [],
"REACTION-LIKE": [],
}
for user, user_stat in stats.items():
for key in top_stats.keys():
try:
top_stats[key].append({
"user": user,
"count": user_stat[key]
})
except Exception:
continue
for key, value in top_stats.items():
top_stats[key] = sorted(value, key=lambda key: key["count"], reverse=True)
return top_stats
|
<gh_stars>0
import os
import requests
import textwrap
import time
from urllib.parse import urlparse
from sphinxcontrib.needs.api import add_need_type
from sphinxcontrib.needs.api.exceptions import NeedsApiConfigException
from sphinxcontrib.needs.services.base import BaseService
# Additional needed options, which are not defined by default
EXTRA_DATA_OPTIONS = ['user', 'created_at', 'updated_at', 'closed_at', 'service']
EXTRA_LINK_OPTIONS = ['url']
EXTRA_IMAGE_OPTIONS = ['avatar']
# Additional options, which are used to configure the service and shall not be part of the later needs
CONFIG_OPTIONS = ['type', 'query', 'specific', 'max_amount', 'max_content_lines', 'id_prefix']
# All Github related data options
GITHUB_DATA = ['status', 'tags'] + EXTRA_DATA_OPTIONS + EXTRA_LINK_OPTIONS + EXTRA_IMAGE_OPTIONS
# Needed for layout. Example: "user","avatar",...
GITHUB_DATA_STR = '"' + '","'.join(EXTRA_DATA_OPTIONS + EXTRA_LINK_OPTIONS + EXTRA_IMAGE_OPTIONS) + '"'
CONFIG_DATA_STR = '"' + '","'.join(CONFIG_OPTIONS) + '"'
GITHUB_LAYOUT = {
'grid': 'complex',
'layout': {
'head_left': [
'<<meta_id()>>',
'<<collapse_button("meta,footer", collapsed="icon:arrow-down-circle", '
'visible="icon:arrow-right-circle", initial=True)>>'
],
'head': ['**<<meta("title")>>** ('
+ ", ".join(['<<link("{value}", text="{value}", is_dynamic=True)>>'.format(value=x)
for x in EXTRA_LINK_OPTIONS]) + ')'],
'head_right': [
'<<image("field:avatar", width="40px", align="middle")>>',
'<<meta("user")>>'
],
'meta_left': ['<<meta("{value}", prefix="{value}: ")>>'.format(value=x) for x in EXTRA_DATA_OPTIONS] +
[
'<<link("{value}", text="Link", prefix="{value}: ", is_dynamic=True)>>'.format(value=x)
for x in EXTRA_LINK_OPTIONS],
'meta_right': [
'<<meta("type_name", prefix="type: ")>>',
'<<meta_all(no_links=True, exclude=["layout","style",{}, {}])>>'.format(GITHUB_DATA_STR, CONFIG_DATA_STR),
'<<meta_links_all()>>'
],
'footer_left': [
'layout: <<meta("layout")>>',
],
'footer': [
'service: <<meta("service")>>',
],
'footer_right': [
'style: <<meta("style")>>'
]
}
}
class GithubService(BaseService):
options = CONFIG_OPTIONS + EXTRA_DATA_OPTIONS + EXTRA_LINK_OPTIONS + EXTRA_IMAGE_OPTIONS
def __init__(self, app, name, config, **kwargs):
self.app = app
self.name = name
self.config = config
self.url = self.config.get('url', 'https://api.github.com/')
if self.url[len(self.url)-1] != "/":
self.url = self.url + "/"
self.max_amount = self.config.get('max_amount', 5)
self.max_content_lines = self.config.get('max_content_lines', -1)
self.id_prefix = self.config.get('id_prefix', 'GITHUB_')
self.layout = self.config.get('layout', 'github')
self.download_avatars = self.config.get('download_avatars', True)
self.download_folder = self.config.get('download_folder', 'github_images')
self.username = self.config.get('username', None)
self.token = self.config.get('token', None)
if 'github' not in self.app.config.needs_layouts.keys():
self.app.config.needs_layouts['github'] = GITHUB_LAYOUT
self.gh_type_config = {
'issue': {
'url': 'search/issues',
'query': 'is:issue',
'need_type': 'issue'
},
'pr': {
'url': 'search/issues',
'query': 'is:pr',
'need_type': 'pr'
},
'commit': {
'url': 'search/commits',
'query': '',
'need_type': 'commit'
},
}
try:
add_need_type(self.app, 'issue', 'Issue', 'IS_', '#cccccc', 'card')
except NeedsApiConfigException:
pass # Issue already exists, so we are fine
try:
add_need_type(self.app, 'pr', 'PullRequest', 'PR_', '#aaaaaa', 'card')
except NeedsApiConfigException:
pass # PR already exists, so we are fine
try:
add_need_type(self.app, 'commit', 'Commit', 'C_', '#888888', 'card')
except NeedsApiConfigException:
pass # Commit already exists, so we are fine
if 'gh_type' in kwargs:
self.gh_type = kwargs['gh_type']
if self.gh_type not in self.gh_type_config.keys():
raise KeyError('github type "{}" not supported. Use: {}'.format(
self.gh_type, ", ".join(self.gh_type_config.keys())))
# Set need_type to use by default
self.need_type = self.config.get('need_type', self.gh_type_config[self.gh_type]['need_type'])
super(GithubService, self).__init__()
def _send(self, query, options, specific=False):
headers = {}
if self.gh_type == 'commit':
headers['Accept'] = "application/vnd.github.cloak-preview+json"
if not specific:
url = self.url + self.gh_type_config[self.gh_type]['url']
query = '{} {}'.format(query, self.gh_type_config[self.gh_type]["query"])
params = {
'q': query,
'per_page': options.get('max_amount', self.max_amount)
}
else:
try:
specific_elements = query.split('/')
owner = specific_elements[0]
repo = specific_elements[1]
number = specific_elements[2]
if self.gh_type == 'issue':
single_type = 'issues'
elif self.gh_type == 'pr':
single_type = 'pulls'
else:
single_type = 'commits'
url = self.url + 'repos/{owner}/{repo}/{single_type}/{number}'.format(
owner=owner, repo=repo, single_type=single_type, number=number)
except IndexError:
raise NeedGithubServiceException('Single option ot valid, must follow "owner/repo/number"')
params = {}
self.log.info('Service {} requesting data for query: {}'.format(self.name, query))
if self.username:
auth = (self.username, self.token)
else:
auth = None
resp = requests.get(url, params=params, auth=auth, headers=headers)
if resp.status_code > 299:
extra_info = ""
# Lets try to get information about the rate limit, as this is mostly the main problem.
if 'rate limit' in resp.json()['message']:
resp_limit = requests.get(self.url + 'rate_limit', auth=auth)
extra_info = resp_limit.json()
self.log.info('GitHub: API rate limit exceeded. We need to wait 60 secs...')
self.log.info(extra_info)
time.sleep(61)
resp = requests.get(url, params=params, auth=auth, headers=headers)
if resp.status_code > 299:
if 'rate limit' in resp.json()['message']:
raise NeedGithubServiceException("GitHub: API rate limit exceeded (twice). Stop here.")
else:
raise NeedGithubServiceException('Github service error during request.\n'
'Status code: {}\n'
'Error: {}\n'
'{}'.format(resp.status_code, resp.text, extra_info))
else:
raise NeedGithubServiceException('Github service error during request.\n'
'Status code: {}\n'
'Error: {}\n'
'{}'.format(resp.status_code, resp.text, extra_info))
if specific:
return {'items': [resp.json()]}
return resp.json()
def request(self, options=None):
if options is None:
options = {}
self.log.debug('Requesting data for service {}'.format(self.name))
if 'query' not in options and 'specific' not in options:
raise NeedGithubServiceException('"query" or "specific" missing as option for github service.')
elif 'query' in options and 'specific' in options:
raise NeedGithubServiceException('Only "query" or "specific" allowed for github service. Not both!')
elif 'query' in options:
query = options['query']
specific = False
else:
query = options['specific']
specific = True
response = self._send(query, options, specific=specific)
if 'items' not in response.keys():
if 'errors' in response.keys():
raise NeedGithubServiceException('GitHub service query error: {}\n'
'Used query: {}'.format(response["errors"][0]["message"], query))
else:
raise NeedGithubServiceException('Github service: Unknown error.')
if self.gh_type == 'issue' or self.gh_type == 'pr':
data = self.prepare_issue_data(response['items'], options)
elif self.gh_type == 'commit':
data = self.prepare_commit_data(response['items'], options)
else:
raise NeedGithubServiceException('Github service failed. Wrong gh_type...')
return data
def prepare_issue_data(self, items, options):
data = []
for item in items:
# wraps content lines, if they are too long. Respects already existing newlines.
content_lines = ['\n '.join(textwrap.wrap(line, 60, break_long_words=True, replace_whitespace=False))
for line in item["body"].splitlines() if line.strip() != '']
content = '\n\n '.join(content_lines)
# Reduce content length, if requested by config
if self.max_content_lines > 0:
max_lines = int(options.get('max_content_lines', self.max_content_lines))
content_lines = content.splitlines()
if len(content_lines) > max_lines:
content_lines = content_lines[0:max_lines]
content_lines.append('\n [...]\n') # Mark, if content got cut
content = '\n'.join(content_lines)
# Be sure the content gets not interpreted as rst or html code, so we put
# everything in a safe code-block
content = '.. code-block:: text\n\n ' + content
prefix = options.get('id_prefix', self.id_prefix)
need_id = prefix + str(item["number"])
given_tags = options.get('tags', False)
github_tags = ",".join([x['name'] for x in item["labels"]])
if given_tags:
tags = str(given_tags) + ', ' + str(github_tags)
else:
tags = github_tags
avatar_file_path = self._get_avatar(item["user"]['avatar_url'])
element_data = {
'service': self.name,
'type': options.get('type', self.need_type),
'layout': options.get('layout', self.layout),
'id': need_id,
'title': item["title"],
'content': content,
'status': item["state"],
'tags': tags,
'user': item["user"]['login'],
'url': item['html_url'],
'avatar': avatar_file_path,
'created_at': item['created_at'],
'updated_at': item['updated_at'],
'closed_at': item['closed_at']
}
self._add_given_options(options, element_data)
data.append(element_data)
return data
def prepare_commit_data(self, items, options):
data = []
for item in items:
avatar_file_path = self._get_avatar(item["author"]['avatar_url'])
element_data = {
'service': self.name,
'type': options.get('type', self.need_type),
'layout': options.get('layout', self.layout),
'id': self.id_prefix + item['sha'][:6],
'title': item['commit']['message'].split('\n')[0][:60], # 1. line, max length 60 chars
'content': item['commit']['message'],
'user': item['author']['login'],
'url': item['html_url'],
'avatar': avatar_file_path,
'created_at': item['commit']['author']['date']
}
self._add_given_options(options, element_data)
data.append(element_data)
return data
def _get_avatar(self, avatar_url):
"""
Download and store avatar image
:param avatar_url:
:return:
"""
url_parsed = urlparse(avatar_url)
filename = os.path.basename(url_parsed.path) + '.png'
path = os.path.join(self.app.srcdir, self.download_folder)
avatar_file_path = os.path.join(path, filename)
# Placeholder avatar, if things go wrong or avatar download is deactivated
default_avatar_file_path = os.path.join(os.path.dirname(__file__), '../images/avatar.png')
if self.download_avatars:
# Download only, if file not downloaded yet
if not os.path.exists(avatar_file_path):
try:
os.mkdir(path)
except FileExistsError:
pass
if self.username and self.token:
auth = (self.username, self.token)
else:
auth = ()
response = requests.get(avatar_url, auth=auth, allow_redirects=False)
if response.status_code == 200:
with open(avatar_file_path, 'wb') as f:
f.write(response.content)
elif response.status_code == 302:
self.log.warning('GitHub service {} could not download avatar image '
'from {}.\n'
' Status code: {}\n'
' Reason: Looks like the authentication provider tries to redirect you.'
' This is not supported and is a common problem, '
'if you use GitHub Enterprise.'.format(self.name, avatar_url,
response.status_code))
avatar_file_path = default_avatar_file_path
else:
self.log.warning('GitHub service {} could not download avatar image '
'from {}.\n'
' Status code: {}'.format(self.name, avatar_url,
response.status_code
))
avatar_file_path = default_avatar_file_path
else:
avatar_file_path = default_avatar_file_path
return avatar_file_path
def _add_given_options(self, options, element_data):
"""
Add data from options, which was defined by user but is not set by this service
:param options:
:param element_data:
:return:
"""
for key, value in options.items():
# Check if given option is not already handled and is not part of the service internal options
if key not in element_data.keys() and key not in GITHUB_DATA:
element_data[key] = value
class NeedGithubServiceException(BaseException):
pass
|
# -*- coding: utf-8 -*-
"""
fresh_tomatillos.movie_args
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides the glue between fresh_tomatillos.get_config and
fresh_tomatillos.media.Movie. The `generate_movie_args()` function
defined here uses data from a config object to yield the arguments
required by the Movie class's constructor.
"""
from __future__ import unicode_literals
import string
try:
# Python 3
from urllib.parse import urlsplit, parse_qs
except ImportError:
# Python 2
from urlparse import urlsplit, parse_qs
from fresh_tomatillos.exceptions import InvalidVideoID
YOUTUBE_ID_CHARACTERS = frozenset(string.ascii_letters + string.digits + '-_')
LONG_HOSTNAMES = ('www.youtube.com', 'youtube.com', 'm.youtube.com')
SHORT_HOSTNAMES = ('youtu.be',)
def _is_potential_youtube_id(id_string):
"""Determine whether a string might be a valid YouTube video ID.
This function does not *guarantee* that id_string is valid when
returning True, just that it meets some basic requirements:
- It is not an empty string.
- It only contains characters that are permitted in video IDs.
Args:
id_string (str): The string to test.
Returns:
bool: True if `id_string` might be a valid video ID, False otherwise.
"""
# Assume it's a valid ID if it only has YOUTUBE_ID_CHARACTERS
# Don't require length of 11, since YouTube could change the length
return (len(id_string) > 0 and
frozenset(id_string).issubset(YOUTUBE_ID_CHARACTERS))
def _parse_youtube_url(url):
"""Split a URL into its component parts, correcting for a missing scheme.
Args:
url (str): The string assumed to be a URL.
Returns:
SplitResult: An object returned by `urllib.parse.urlsplit()`.
"""
split_url = urlsplit(url)
# Add a URL scheme if `url` doesn't already include one
# (without a scheme, urlsplit incorrectly includes the hostname in 'path')
if (not split_url.scheme):
split_url = urlsplit('https://' + url)
return split_url
def _get_youtube_id_from_url(url):
"""Extract a YouTube video ID from a URL.
Args:
url (str): The URL to search for a YouTube video ID, which should:
- Specify a video (not a channel page, for example).
- Have a hostname that matches one of these options:
- www.youtube.com
- m.youtube.com
- youtube.com
- youtu.be
Returns:
Optional[str]: A YouTube video ID if one was found, otherwise None.
"""
youtube_id = None # Initialize return value
split_url = _parse_youtube_url(url)
# For URLs like https://www.youtube.com/watch?v=youtube__id
if split_url.hostname in LONG_HOSTNAMES:
# e.g. query: {'v': ['youtube__id']}
query = parse_qs(split_url.query)
try:
v_value = query['v'][0]
except (KeyError, IndexError):
pass # The ID was not found
else:
if _is_potential_youtube_id(v_value):
youtube_id = v_value
# For URLs like https://youtu.be/youtube__id
elif split_url.hostname in SHORT_HOSTNAMES:
# Remove leading '/' from '/youtube__id'
path = split_url.path[1:]
if _is_potential_youtube_id(path):
youtube_id = path
return youtube_id
def _get_youtube_id(youtube_source, title):
"""Return a YouTube video ID.
Args:
youtube_source (str): A YouTube video URL or a YouTube video ID.
title (str): The movie title associated with `youtube_source`.
Returns:
str: A YouTube Video ID.
Raises:
InvalidVideoID: Raised if `id_string` is neither a valid YouTube
video ID nor a valid YouTube URL.
"""
# Return early if youtube_source already looks like an ID
if _is_potential_youtube_id(youtube_source):
return youtube_source
youtube_id = _get_youtube_id_from_url(youtube_source)
if not youtube_id:
raise InvalidVideoID(
title=title,
video_source=youtube_source,
source_type='ID or URL')
return youtube_id
def generate_movie_args(config):
"""Return a generator, yielding arguments to pass to the Movie constructor.
Args:
config (ConfigParser): Every section in `config` is assumed to have
string values for all of the following keys:
- summary
- poster
- youtube
Yields:
tuple[str, str, str, str]: Each yielded tuple has this form:
(<title>, <summary>, <poster_url>, <video_id>)
Values in each tuple:
str: A movie title.
str: A summary of the movie's plot.
str: The URL of an image file of the movie's poster.
str: A YouTube video ID for the movie's trailer.
Raises:
InvalidVideoID: Raised if the 'youtube' key for any movie in
`config` is not valid as a YouTube video ID or URL.
(raised in a call to _get_youtube_id())
"""
def lookup_function(title):
"""Create a function for looking up keys in the `title` config section.
Args:
title: (str): The section of `config` in which the returned
function will look up values by key.
Returns:
Callable[str] -> str: A function to look up values by key, specific
to the `title` section of `config`.
"""
return lambda key: config.get(title, key)
for title in config.sections():
get_key = lookup_function(title)
yield (title,
get_key('summary'),
get_key('poster'),
# Exceptions raised by _get_youtube_id() are not caught here
_get_youtube_id(get_key('youtube'), title))
|
<reponame>yairkit/flowstep3d<gh_stars>10-100
import torch
from pytorch_lightning.metrics import TensorMetric
from typing import Any, Optional
from losses.supervised_losses import *
from losses.unsupervised_losses import *
from losses.common_losses import *
class EPE3D(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
epe3d = torch.norm(pred_flow - gt_flow, dim=2).mean()
return epe3d
class Acc3DR(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
acc3d_relax = (torch.logical_or(l2_norm < 0.1, relative_err < 0.1)).float().mean()
return acc3d_relax
class Acc3DS(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
acc3d_strict = (torch.logical_or(l2_norm < 0.05, relative_err < 0.05)).float().mean()
return acc3d_strict
class EPE3DOutliers(TensorMetric):
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
l2_norm = torch.norm(pred_flow - gt_flow, dim=2)
sf_norm = torch.norm(gt_flow, dim=2)
relative_err = l2_norm / (sf_norm + 1e-4)
epe3d_outliers = (torch.logical_or(l2_norm > 0.3, relative_err > 0.1)).float().mean()
return epe3d_outliers
class SupervisedL1LossMetric(TensorMetric):
def __init__(self, name: str, reduce_op: Optional[Any] = None):
super(SupervisedL1LossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = SupervisedL1Loss()
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pc_target, pred_flow, gt_flow)
return loss_metric
class SmoothnessLossMetric(TensorMetric):
def __init__(self, smoothness_loss_params, name: str, reduce_op: Optional[Any] = None):
super(SmoothnessLossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = SmoothnessLoss(**smoothness_loss_params)
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pred_flow)
return loss_metric
class ChamferLossMetric(TensorMetric):
def __init__(self, chamfer_loss_params, name: str, reduce_op: Optional[Any] = None):
super(ChamferLossMetric, self).__init__(name=name, reduce_op=reduce_op)
self.loss = ChamferLoss(**chamfer_loss_params)
def forward(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flow: torch.Tensor, gt_flow: torch.Tensor) -> torch.Tensor:
loss_metric = self.loss(pc_source, pc_target, pred_flow)
return loss_metric
class SceneFlowMetrics():
"""
An object of relevant metrics for scene flow.
"""
def __init__(self, split: str, loss_params: dict, reduce_op: Optional[Any] = None):
"""
Initializes a dictionary of metrics for scene flow
keep reduction as 'none' to allow metrics computation per sample.
Arguments:
split : a string with split type, should be used to allow logging of same metrics for different aplits
loss_params: loss configuration dictionary
reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
Defaults to sum.
"""
self.metrics = {
split + '_epe3d': EPE3D(name='epe3d', reduce_op=reduce_op),
}
if loss_params['loss_type'] == 'sv_l1_reg':
self.metrics[f'{split}_data_loss'] = SupervisedL1LossMetric(name=f'{split}_data_loss', reduce_op=reduce_op)
self.metrics[f'{split}_smoothness_loss'] = SmoothnessLossMetric(loss_params['smoothness_loss_params'], name=f'{split}_smoothness_loss', reduce_op=reduce_op)
if loss_params['loss_type'] == 'unsup_l1':
self.metrics[f'{split}_chamfer_loss'] = ChamferLossMetric(loss_params['chamfer_loss_params'], name=f'{split}_chamfer_loss', reduce_op=reduce_op)
self.metrics[f'{split}_smoothness_loss'] = SmoothnessLossMetric(loss_params['smoothness_loss_params'], name=f'{split}_smoothness_loss', reduce_op=reduce_op)
if split in ['test', 'val']:
self.metrics[f'{split}_acc3dr'] = Acc3DR(name='acc3dr', reduce_op=reduce_op)
self.metrics[f'{split}_acc3ds'] = Acc3DS(name='acc3ds', reduce_op=reduce_op)
self.metrics[f'{split}_epe3d_outliers'] = EPE3DOutliers(name='epe3d_outliers', reduce_op=reduce_op)
def __call__(self, pc_source: torch.Tensor, pc_target: torch.Tensor, pred_flows: list, gt_flow: torch.Tensor) -> dict:
"""
Compute and scale the resulting metrics
Arguments:
pc_source : a tensor containing source point cloud
pc_target : a tensor containing target point cloud
pred_flows : list of tensors containing model's predictions
gt_flow : a tensor containing ground truth labels
Return:
A dictionary of copmuted metrics
"""
result = {}
for key, metric in self.metrics.items():
for i, pred_flow in enumerate(pred_flows):
val = metric(pc_source, pc_target, pred_flow, gt_flow)
result.update({f'{key}_i#{i}': val})
return result
|
import torch
import torch.nn as nn
import numpy as np
from abc import ABC, abstractmethod
import os
import logging
from .util import metric
class CommonLayer(nn.Module, ABC):
#Architecture of a common linear (it has to be inherited using say linear layer)
def __init__(self, ip_size, op_size, act): #Parameters - act : activation function; ip_size : input size op_size : output size
super(CommonLayer, self).__init__()
self._ip_size = ip_size
self._op_size = op_size
self.act = act
self.model = None # this will be defined later in the init_layer function
def init_layer(self, **kwargs): # the model by setting the input and output sizes and setup the layer that transforms the input to the output
if self._ip_size < 0:
self._ip_size = kwargs['ip_size']
if self._op_size < 0:
self._op_size = kwargs['op_size']
assert self._ip_size > 0 and self._op_size > 0, "sizes are not valid"
self.model = self.layer()(self._ip_size, self._op_size) # create a model with a layer (could be linear) that takes input and return output
def forward(self, x): # Forwards x through the model
y = self.model(x)
return self.act(y)
def get_shape(self): # Returns the shape of the model
return (self._ip_size, self._op_size)
def weights_init(self): # Initializes the weights of the model
nn.init.xavier_uniform_(self.model.weight) # xavier_uniform_, layr.weight.data.fill_(0.01)
@abstractmethod
def layer(self): # Abstract method for a layer (could be simply a linear layer nn.Linear)
pass
class LinearLayer(CommonLayer):
# A LinearLayer
def __init__(self, ip_size, op_size, act=None):
if act is None:
act = nn.ReLU()
super(LinearLayer, self).__init__(ip_size, op_size, act)
self.init_layer()
def layer(self):
return nn.Linear
class BaseNNModule(nn.Module):
def __init__(self, lst_structure):
super(BaseNNModule, self).__init__()
self.layrs = nn.ModuleList()
for lyr in lst_structure:
self.layrs.append(LinearLayer(lyr['input_size'], lyr['output_size'], act=eval(lyr['act'])))
def weights_init(self):
# Initialize the weights of the model
for lyr in self.layrs:
lyr.weights_init()
def fit(self, data_iterator, optimizer, criterion): # Optimizes the model using the input data. It passes over the data only once
# Parameters: data_iterator: iterator that returns batches of x and y; optimizer: pytorch optimizer to be used to optimize the network criterion: loss function
self.train() # set model to training mode
running_loss = 0
while True: # compute sum of losses over all batches
try:
x_batch, y_batch, base_batch = next(data_iterator)
# compute ypred (forward pass)
y_pred = self.forward(x_batch, base_batch)
loss = criterion(y_pred, y_batch)
optimizer.zero_grad() # init gradients to zeros
loss.backward() # backpropagation (backward pass)
optimizer.step() # update the weights
running_loss += loss.data # detach().numpy()
except StopIteration:
# if StopIteration is raised, break from loop
break
logging.debug(" running loss is %.4f" % (running_loss))
return y_pred, running_loss
def predict(self, data_loader): # Predicts the test data
# Parameters: x_data: input data
self.eval() # set model to evaluation mode
x_data, _, base_pred = data_loader.full_data_iterator()
y_pred = self.forward(x_data, base_pred)
return y_pred
def score(self, data_loader): # Returns the prediction score (accuracy) of the model on the data_iterator
# Parameters: data_iterator: iterator that returns batches of x and y ; metrics: dictionary of functions to compute required metric
self.eval() # set model to evaluation mode
summary = []
_, y, _ = data_loader.full_data_iterator()
y_pred = self.predict(data_loader)
metrics_mean = metric(y_pred.detach().numpy(), y.detach().numpy())
logging.debug("- Eval metrics : " + str(metrics_mean))
return metrics_mean
class GateEnsemble(BaseNNModule):
def __init__(self, lst_structure):
super(GateEnsemble, self).__init__(lst_structure)
def forward(self, x, base): # x: is the original input data; base: is the prediction from the individual models
w = x
for lyr in self.layrs:
w = lyr(w) # the last layer should be softmax
y = w*base # this is element-wise multiplication
y = torch.sum(y, dim=1) # sum over all columns (i.e. for each row)
return y.view(-1,)
class FFN(BaseNNModule):
def __init__(self, lst_structure):
super(FFN, self).__init__(lst_structure)
def forward(self, x, base): # x: is the original input data, # base: is the prediction from the individual models
y = x
for lyr in self.layrs:
y = lyr(y) # the last layer should be softmax
return y.view(-1,) |
from mrcp.panel import *
from mrcp.points import *
from mrcp.track import *
class Curve(BaseElement):
def __init__(self, pos=(0,0), color=COLOR_TRACK_DEFAULT,radius=2,left=True, up=True) -> None:
super().__init__(pos=pos, color=color)
self._radius=radius
self._left=left
self._up=up
self._tracks=[Track(color=color),Track(color=color),Track(color=color)]
def attach(self, panel):
for track in self._tracks:
track.attach(panel)
return super().attach(panel)
def paint(self):
delta1=1+2*self._radius;
delta2=2*(self._radius-1);
dx=1;
dsy=0;
dsx=0;
dy=1;
if self._left:
dx=-1;
dsx=1
if self._up:
dy=-1
dsy=1
#pa=pointC(self._pos,(dx*delta1,dy*delta2))
pa=self._pos.move((dx*delta1,dy*delta2))
pa._pos='c'
#sa=pointV(self._pos,(dx*delta1,dsy))
sa=self._pos.move((dx*delta1,dsy))
sa._pos='t'
#pb=pointC(self._pos,(dx*delta2,dy*delta1))
pb=self._pos.move((dx*delta2,dy*delta1))
pb._pos='c'
#sb=pointH(self._pos,(dsx,dy*delta1))
sb=self._pos.move((dsx,dy*delta1))
sb._pos='l'
self._tracks[0]._pos=sa
self._tracks[0]._end=pa
self._tracks[0].paint()
self._tracks[1]._pos=pa
self._tracks[1]._end=pb
self._tracks[1].paint()
self._tracks[2]._pos=pb
self._tracks[2]._end=sb
self._tracks[2].paint()
circle=self._panel._dwg.circle(center=pa.toCoords(place='c'),r=TRACK_SIZE/2,stroke="none", fill=self._color)
self._panel._tLayer.add(circle)
circle=self._panel._dwg.circle(center=pb.toCoords(place='c'),r=TRACK_SIZE/2,stroke="none", fill=self._color)
self._panel._tLayer.add(circle)
class OutCurve(BaseElement):
def __init__(self, pos=(0,0), color=COLOR_TRACK_DEFAULT,right=True, up=True, vertical=False) -> None:
super().__init__(pos=pos, color=color)
self._right=right
self._up=up
self._vertical=vertical
self._tracks=[Track(color=color),Track(color=color)]
def attach(self, panel):
for track in self._tracks:
track.attach(panel)
return super().attach(panel)
def paint(self):
super().paint()
dy = 1
if(self._up):
dy = -1
pos = self._pos
thHalfPoint =Point(0,0)
thPoint = Point(0,0)
thEnd = Point(0,0)
if self._vertical:
thHalfPoint =pos.move(delta=(0, 0))
thPoint = pos.move(delta=(-1,2*dy))
thEnd = pos.move( delta=(-1, 4*dy))
if self._right:
thHalfPoint = pos.move(delta=(0, 0))
thPoint = pos.move( delta=(1, 2*dy))
thEnd = pos.move(delta=(1,4*dy))
thEnd._pos='t'
thPoint._pos='t'
thHalfPoint._pos='t'
else:
thHalfPoint = pos.move(delta=(4, 0))
thPoint = pos.move(delta=(2, dy))
thEnd = pos.move( delta=(0, dy))
if self._right:
thHalfPoint = pos.move(delta=(0, 0))
thPoint = pos.move(delta=(2, dy))
thEnd = pos.move( delta=(4, dy))
thEnd._pos='l'
thPoint._pos='l'
thHalfPoint._pos='l'
self._tracks[0]._pos=thPoint
self._tracks[0]._end=thEnd
self._tracks[1]._pos=thHalfPoint
self._tracks[1]._end=thPoint
circle = self._panel._dwg.circle(center=thPoint.toCoords(), r=TRACK_SIZE/2, fill=self._color)
self._panel._tLayer.add(circle)
for track in self._tracks:
track.paint()
|
<filename>scrape_reports.py
import requests, json, re, urllib.parse, os
from string import Template
from bs4 import BeautifulSoup
import new_email
def read_value(soup, id):
value = soup.find(id=id).get('value')
if value.lower() == "n/a":
return ""
else:
return value
def get_email(email):
if email.lower() == "n/a":
return ""
else:
return email
def read_template(filename):
"""
Returns a Template object comprising the contents of the
file specified by filename.
"""
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file = os.path.join(THIS_FOLDER, filename)
with open(my_file, 'r', encoding='utf-8') as template_file:
template_file_content = template_file.read()
return Template(template_file_content)
def red_wrap_and_make_editable(your_name, element_id):
return '{}{}{}{}{}'.format('<font color="red"><span id="', element_id, '_display_value" class="makeEditable">',
your_name, "</span></font>")
def red_wrap(your_name):
return '{}{}{}'.format('<font color="red">', your_name, "</font>")
def create_message(your_name, best_school_contact_full_name, principal_full_name,
best_school_contact_email, principal_email, parent_email, child_first_name, child_last_name,
school_name, child_pronoun, new_addressee=""):
message_template = read_template('message.txt')
if new_addressee:
addressee = new_addressee
else:
addressee = get_addressee(best_school_contact_email, best_school_contact_full_name, principal_email,
principal_full_name)
# add in the values to the message template
message = message_template.substitute(YOUR_NAME=your_name, ADDRESSEE=addressee,
CHILD_FULL_NAME=' '.join([child_first_name, child_last_name]),
CHILD_FIRST_NAME=child_first_name,
SCHOOL_NAME=school_name, CHILD_PRONOUN=child_pronoun)
marked_message = message_template.substitute(YOUR_NAME=red_wrap(your_name),
ADDRESSEE=red_wrap_and_make_editable(addressee, "addressee"),
CHILD_FULL_NAME=red_wrap(
' '.join([child_first_name, child_last_name])),
CHILD_FIRST_NAME=red_wrap(child_first_name),
SCHOOL_NAME=red_wrap(school_name),
CHILD_PRONOUN=red_wrap(child_pronoun))
child_name_for_subject = get_child_name_for_subject(child_first_name, child_last_name)
return principal_email, principal_full_name, best_school_contact_email, best_school_contact_full_name, \
parent_email, child_first_name, child_last_name, school_name, child_pronoun, message, \
child_name_for_subject, marked_message
def get_addressee(best_school_contact_email, best_school_contact_full_name, principal_email, principal_full_name):
# create the addressee based on which contact is filled out in the application
if best_school_contact_full_name or principal_full_name:
if not get_email(best_school_contact_email):
addressee = principal_full_name
elif not get_email(principal_email):
addressee = best_school_contact_full_name
else:
addressee = ' and '.join(filter(None, list({best_school_contact_full_name, principal_full_name})))
else:
raise Exception('No best school contact name or principal name found, got ' +
' and '.join([best_school_contact_full_name, principal_full_name]))
return addressee
def get_email_address(best_school_contact_email, principal_email):
return list(filter(None, list({get_email(best_school_contact_email), get_email(principal_email)})))
def get_child_name_for_subject(child_first_name, child_last_name):
return ' '.join([child_first_name, child_last_name[0] + '.'])
def generate_email_params(username, apricot_username, apricot_password):
# Create a session, holds all the cookies across API calls
session = requests.Session()
session = authenticate(apricot_password, apricot_username, session)
report_json, session = get_report_json(session, username)
# Record each of the report ids from the JSON
for ids in report_json['dataset']['groups']['All Rows']['document_ids']:
response = get_child_report(ids, session)
# Create a lovely soup of the report response
soup = BeautifulSoup(response.text, 'html.parser')
child_first_name, child_last_name, child_pronoun = save_child_info(soup)
school_info_form_id, school_info_url = new_email.get_url_for_school_info(report_json)
parent_email = save_parent_email(soup)
response = session.get(school_info_url)
soup = BeautifulSoup(response.text, 'html.parser')
school_name = read_value(soup, "field_541")
best_school_contact_email, best_school_contact_full_name = save_best_school_contact_info(soup)
principal_email, principal_full_name = save_principal_info(soup)
email_tup = create_message(username, best_school_contact_full_name, principal_full_name,
best_school_contact_email,
principal_email, parent_email, child_first_name, child_last_name, school_name, child_pronoun)
return email_tup, session
def save_parent_email(soup):
return read_value(soup, "field_122")
def save_principal_info(soup):
principal_first_name = read_value(soup, "field_551_first")
principal_middle_name = read_value(soup, "field_551_middle")
principal_last_name = read_value(soup, "field_551_last")
principal_full_name = ' '.join(filter(None, [principal_first_name, principal_middle_name, principal_last_name]))
principal_email = read_value(soup, "field_555")
return principal_email, principal_full_name
def save_child_info(soup):
child_first_name = read_value(soup, "field_2_first")
child_middle_name = read_value(soup, "field_2_middle")
child_last_name = read_value(soup, "field_2_last")
child_full_name = ' '.join(filter(None, [child_first_name, child_middle_name, child_last_name]))
child_sex = read_value(soup, "field_8")
if child_sex == "Female":
child_pronoun = "she"
else:
child_pronoun = "he"
return child_first_name, child_last_name, child_pronoun
def get_child_report(ids, session):
# Report ID for the first record in the row
report_id = ids[next(iter(ids.keys()))]
# Setup for full report request
url = "https://apricot.socialsolutions.com/document/edit/id/" + report_id
session.headers.update({'Referer': "https://apricot.socialsolutions.com/auth/approved"})
response = session.get(url)
return response
def save_best_school_contact_info(soup):
best_school_contact_first_name = read_value(soup, "field_573_first")
best_school_contact_middle_name = read_value(soup, "field_573_middle")
best_school_contact_last_name = read_value(soup, "field_573_last")
best_school_contact_full_name = ' '.join(filter(None, [best_school_contact_first_name,
best_school_contact_middle_name,
best_school_contact_last_name]))
best_school_contact_email = read_value(soup, "field_579")
return best_school_contact_email, best_school_contact_full_name
def get_report_list(session):
url = "https://apricot.socialsolutions.com/report/list"
# Get reports list
return session.get(url)
def get_report_json(session, username):
response = get_all_reports_list(session)
user_report_path = get_users_report(response, username)
section_id, state_id = get_awaiting_intro_email_section_id(session, user_report_path)
response = get_awaiting_into_email_report(response, section_id, session, state_id)
# Create a lovely soup of the report response
soup = BeautifulSoup(response.text, 'html.parser')
# Grab the JSON from the report response
report_json = json.loads(soup.find(id="section_" + section_id + "_json").get('data-json'))
return report_json, session
def get_awaiting_into_email_report(response, section_id, session, state_id):
# Setup for the report post
url = "https://apricot.socialsolutions.com/report/refresh/reloading/false"
session.headers.update({'Referer': "https://apricot.socialsolutions.com/bulletins/list"})
payload = "state_id=" + state_id + "§ion_id=" + section_id + "&mode=run&in_bulletin=true&fetchNew=true"
response = session.post(url, data=payload)
return response
def get_awaiting_intro_email_section_id(session, user_report_path):
url = "https://apricot.socialsolutions.com" + user_report_path
response = session.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
state_id = soup.find('input', id='state_id')['value']
sections = \
json.loads(soup.find('script', language="JavaScript").text.split("=", 1)[1].strip().strip(";"))['report_state'][
'sections']
for section in sections:
if section['name'] == 'Awaiting School Intro Email ':
section_id = section['id']
return section_id, state_id
def get_users_report(response, username):
# Get first name to identify report element
first_name = username.split(' ')[0]
soup = BeautifulSoup(response.text, 'html.parser')
pattern = re.compile(".*" + first_name + "'s (Report|Caseload).*")
# Get link to report for username
report_path = list(filter(lambda item: pattern.search(item.text.strip()), soup.find_all('h4')))[0].findNext('a')[
'href']
return report_path
def get_all_reports_list(session):
url = "https://apricot.socialsolutions.com/report/list"
# Get reports list
response = session.get(url)
return response
def authenticate(apricot_password, apricot_username, session):
# Grab the initial cookie values
session.get("https://apricot.socialsolutions.com/auth")
# Setup for login post
url = "https://apricot.socialsolutions.com/auth/check"
payload = "serverLocation=https%3A%2F%2Fapricot.socialsolutions.com%2Fauth&username={0}&password={1}".format(
urllib.parse.quote(apricot_username), urllib.parse.quote(apricot_password))
session.headers.update(
{'Referer': "https://apricot.socialsolutions.com/auth", 'content-type': "application/x-www-form-urlencoded"})
# Login post
response = session.post(url, data=payload)
# If the account is logged in already message is found, send the new login request and update cookies accordingly
if ("This account is currently logged in on another device" in response.text):
url = "https://apricot.socialsolutions.com/auth/confirmnewlogin"
response = session.get(url)
return session
|
import json
import logging
import os
try:
from urllib2 import HTTPError
except ImportError:
from urllib.error import HTTPError
from django.conf import settings
from django.contrib.auth import login
from django.shortcuts import redirect, render
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import requests
from tweet_display.helper import get_file_url
from tweet_display.tasks import import_data
from .models import OpenHumansMember
from .forms import UploadFileForm
# Open Humans settings
OH_BASE_URL = settings.OH_BASE_URL
OH_API_BASE = OH_BASE_URL + '/api/direct-sharing'
OH_DELETE_FILES = OH_API_BASE + '/project/files/delete/'
OH_DIRECT_UPLOAD = OH_API_BASE + '/project/files/upload/direct/'
OH_DIRECT_UPLOAD_COMPLETE = OH_API_BASE + '/project/files/upload/complete/'
APP_BASE_URL = os.getenv('APP_BASE_URL', 'http://127.0.0.1:5000/users')
APP_PROJ_PAGE = 'https://www.openhumans.org/activity/twitter-archive-analyzer/'
# Set up logging.
logger = logging.getLogger(__name__)
def oh_get_member_data(token):
"""
Exchange OAuth2 token for member data.
"""
req = requests.get(
'{}/api/direct-sharing/project/exchange-member/'.format(OH_BASE_URL),
params={'access_token': token})
if req.status_code == 200:
return req.json()
raise Exception('Status code {}'.format(req.status_code))
return None
def oh_code_to_member(code):
"""
Exchange code for token, use this to create and return OpenHumansMember.
If a matching OpenHumansMember already exists in db, update and return it.
"""
if settings.OH_CLIENT_SECRET and settings.OH_CLIENT_ID and code:
data = {
'grant_type': 'authorization_code',
'redirect_uri': '{}/complete'.format(APP_BASE_URL),
'code': code,
}
req = requests.post(
'{}/oauth2/token/'.format(OH_BASE_URL),
data=data,
auth=requests.auth.HTTPBasicAuth(
settings.OH_CLIENT_ID,
settings.OH_CLIENT_SECRET
))
data = req.json()
if 'access_token' in data:
oh_id = oh_get_member_data(
data['access_token'])['project_member_id']
try:
oh_member = OpenHumansMember.objects.get(oh_id=oh_id)
logger.debug('Member {} re-authorized.'.format(oh_id))
oh_member.access_token = data['access_token']
oh_member.refresh_token = data['refresh_token']
oh_member.token_expires = OpenHumansMember.get_expiration(
data['expires_in'])
except OpenHumansMember.DoesNotExist:
oh_member = OpenHumansMember.create(
oh_id=oh_id,
access_token=data['access_token'],
refresh_token=data['refresh_token'],
expires_in=data['expires_in'])
logger.debug('Member {} created.'.format(oh_id))
oh_member.save()
return oh_member
elif 'error' in req.json():
logger.debug('Error in token exchange: {}'.format(req.json()))
else:
logger.warning('Neither token nor error info in OH response!')
else:
logger.error('OH_CLIENT_SECRET or code are unavailable')
return None
def delete_all_oh_files(oh_member):
"""
Delete all current project files in Open Humans for this project member.
"""
requests.post(
OH_DELETE_FILES,
params={'access_token': oh_member.get_access_token()},
data={'project_member_id': oh_member.oh_id,
'all_files': True})
def upload_file_to_oh(oh_member, filehandle, metadata):
"""
This demonstrates using the Open Humans "large file" upload process.
The small file upload process is simpler, but it can time out. This
alternate approach is required for large files, and still appropriate
for small files.
This process is "direct to S3" using three steps: 1. get S3 target URL from
Open Humans, 2. Perform the upload, 3. Notify Open Humans when complete.
"""
# Remove any previous file - replace with this one.
delete_all_oh_files(oh_member)
# Get the S3 target from Open Humans.
upload_url = '{}?access_token={}'.format(
OH_DIRECT_UPLOAD, oh_member.get_access_token())
req1 = requests.post(
upload_url,
data={'project_member_id': oh_member.oh_id,
'filename': filehandle.name,
'metadata': json.dumps(metadata)})
if req1.status_code != 201:
raise HTTPError(upload_url, req1.status_code,
'Bad response when starting file upload.')
# Upload to S3 target.
req2 = requests.put(url=req1.json()['url'], data=filehandle)
if req2.status_code != 200:
raise HTTPError(req1.json()['url'], req2.status_code,
'Bad response when uploading to target.')
# Report completed upload to Open Humans.
complete_url = ('{}?access_token={}'.format(
OH_DIRECT_UPLOAD_COMPLETE, oh_member.get_access_token()))
req3 = requests.post(
complete_url,
data={'project_member_id': oh_member.oh_id,
'file_id': req1.json()['id']})
if req3.status_code != 200:
raise HTTPError(complete_url, req2.status_code,
'Bad response when completing upload.')
# print('Upload done: "{}" for member {}.'.format(
# os.path.basename(filehandle.name), oh_member.oh_id))
def index(request):
"""
Starting page for app.
"""
context = {'client_id': settings.OH_CLIENT_ID,
'oh_proj_page': settings.OH_ACTIVITY_PAGE,
'redirect_uri': settings.OH_REDIRECT_URI}
if request.user.is_authenticated:
return redirect('dashboard')
return render(request, 'users/index.html', context=context)
def complete(request):
"""
Receive user from Open Humans. Store data, start data upload task.
"""
logger.debug("Received user returning from Open Humans.")
form = None
if request.method == 'GET':
# Exchange code for token.
# This creates an OpenHumansMember and associated User account.
code = request.GET.get('code', '')
oh_member = oh_code_to_member(code=code)
if oh_member:
# Log in the user.
user = oh_member.user
login(request, user,
backend='django.contrib.auth.backends.ModelBackend')
elif not request.user.is_authenticated:
logger.debug('Invalid code exchange. User returned to start page.')
return redirect('/')
else:
oh_member = request.user.openhumansmember
if get_file_url(oh_member.oh_id) is not None:
return redirect('dashboard')
form = UploadFileForm()
context = {'oh_id': oh_member.oh_id,
'oh_member': oh_member,
'oh_proj_page': settings.OH_ACTIVITY_PAGE,
'form': form}
return render(request, 'users/complete.html',
context=context)
elif request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
metadata = {'tags': ['twitter', 'twitter-archive'],
'description': 'Twitter achive file.'}
upload_file_to_oh(
request.user.openhumansmember,
request.FILES['file'],
metadata)
else:
logger.debug('INVALID FORM')
import_data.delay(request.user.openhumansmember.oh_id)
return redirect('dashboard')
def public_data(request):
public_user_list = OpenHumansMember.objects.filter(
public=True).order_by(
'oh_id')
paginator = Paginator(public_user_list, 20) # Show 20 contacts per page
page = request.GET.get('page')
try:
public_users = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
public_users = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
public_users = paginator.page(paginator.num_pages)
return render(request, 'users/public_data.html',
{'public_users': public_users,
'section': 'public_data'})
def dashboard(request):
"""
Give options to delete account, make data public/private,
reupload archive, trigger new parsing of archive.
"""
if request.user.is_authenticated:
oh_member = request.user.openhumansmember
has_data = bool(get_file_url(oh_member.oh_id))
context = {'client_id': settings.OH_CLIENT_ID,
'oh_proj_page': settings.OH_ACTIVITY_PAGE,
'oh_member': oh_member,
'has_data': has_data,
'section': 'home'}
return render(request, 'users/dashboard.html', context=context)
return redirect("/")
def delete_account(request):
if request.user.is_authenticated:
oh_member = request.user.openhumansmember
oh_member.delete()
request.user.delete()
return redirect("/")
def access_switch(request):
if request.user.is_authenticated:
oh_member = request.user.openhumansmember
if oh_member.public:
oh_member.public = False
else:
oh_member.public = True
oh_member.save()
return redirect('dashboard')
def regenerate_graphs(request):
if request.method == 'POST' and request.user.is_authenticated:
import_data.delay(request.user.openhumansmember.oh_id)
return redirect('dashboard')
def upload_old(request):
if request.user.is_authenticated:
return render(request, 'users/upload_old.html')
return redirect('dashboard')
|
# Copyright 2018-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
TODO: provider policy execution initialization for outputs
"""
import datetime
import logging
import time
try:
from google.cloud.storage import Bucket, Client as StorageClient
except ImportError:
Bucket, StorageClient = None, None
try:
from google.cloud.logging import Client as LogClient
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging.resource import Resource
except ImportError:
LogClient = None
from c7n.output import (
blob_outputs,
log_outputs,
metrics_outputs,
BlobOutput,
Metrics,
LogOutput)
from c7n.utils import local_session
@metrics_outputs.register('gcp')
class StackDriverMetrics(Metrics):
METRICS_PREFIX = 'custom.googleapis.com/custodian/policy'
DESCRIPTOR_COMMON = {
'metricsKind': 'GAUGE',
'labels': [{
'key': 'policy',
'valueType': 'STRING',
'description': 'Custodian Policy'}],
}
METRICS_DESCRIPTORS = {
'resourcecount': {
'type': '{}/{}'.format(METRICS_PREFIX, 'resourcecount'),
'valueType': 'INT64',
'units': 'items',
'description': 'Number of resources that matched the given policy',
'displayName': 'Resources',
},
'resourcetime': {
'type': '{}/{}'.format(METRICS_PREFIX, 'resourcetime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to query the resources for a given policy',
'displayName': 'Query Time',
},
'actiontime': {
'type': '{}/{}'.format(METRICS_PREFIX, 'actiontime'),
'valueType': 'DOUBLE',
'units': 's',
'description': 'Time to perform actions for a given policy',
'displayName': 'Action Time',
},
}
# Custom metrics docs https://tinyurl.com/y8rrghwc
log = logging.getLogger('c7n_gcp.metrics')
def __init__(self, ctx, config=None):
super(StackDriverMetrics, self).__init__(ctx, config)
self.project_id = local_session(self.ctx.session_factory).get_default_project()
self.write_metrics_project_id = self.config.get('project_id', self.project_id)
def initialize(self):
"""One time initialization of metrics descriptors.
# tbd - unclear if this adding significant value.
"""
client = local_session(self.ctx.session_factory).client(
'monitoring', 'v3', 'projects.metricDescriptors')
descriptor_map = {
n['type'].rsplit('/', 1)[-1]: n for n in client.execute_command('list', {
'name': 'projects/%s' % self.project_id,
'filter': 'metric.type=startswith("{}")'.format(self.METRICS_PREFIX)}).get(
'metricsDescriptors', [])}
created = False
for name in self.METRICS_DESCRIPTORS:
if name in descriptor_map:
continue
created = True
md = self.METRICS_DESCRIPTORS[name]
md.update(self.DESCRIPTOR_COMMON)
client.execute_command(
'create', {'name': 'projects/%s' % self.project_id, 'body': md})
if created:
self.log.info("Initializing StackDriver Metrics Descriptors")
time.sleep(5)
def _format_metric(self, key, value, unit, dimensions):
# Resource is a Google controlled vocabulary with artificial
# limitations on resource type there's not much useful we can
# utilize.
now = datetime.datetime.utcnow()
metrics_series = {
'metric': {
'type': 'custom.googleapis.com/custodian/policy/%s' % key.lower(),
'labels': {
'policy': self.ctx.policy.name,
'project_id': self.project_id
},
},
'metricKind': 'GAUGE',
'valueType': 'INT64',
'resource': {
'type': 'global',
},
'points': [{
'interval': {
'endTime': now.isoformat('T') + 'Z',
'startTime': now.isoformat('T') + 'Z'},
'value': {'int64Value': int(value)}}]
}
return metrics_series
def _put_metrics(self, ns, metrics):
session = local_session(self.ctx.session_factory)
client = session.client('monitoring', 'v3', 'projects.timeSeries')
params = {'name': "projects/{}".format(self.write_metrics_project_id),
'body': {'timeSeries': metrics}}
client.execute_command('create', params)
@log_outputs.register('gcp', condition=bool(LogClient))
class StackDriverLogging(LogOutput):
def get_log_group(self):
log_group = self.config.netloc
if log_group:
log_group = "custodian-%s-%s" % (log_group, self.ctx.policy.name)
else:
log_group = "custodian-%s" % self.ctx.policy.name
return log_group
def get_handler(self):
# TODO drop these grpc variants for the REST versions, and we can drop
# protobuf/grpc deps, and also so we can record tests.
log_group = self.get_log_group()
project_id = local_session(self.ctx.session_factory).get_default_project()
client = LogClient(project_id)
return CloudLoggingHandler(
client,
log_group,
labels={
'policy': self.ctx.policy.name,
'resource': self.ctx.policy.resource_type},
resource=Resource(type='project', labels={'project_id': project_id}))
def leave_log(self):
super(StackDriverLogging, self).leave_log()
# Flush and stop the background thread
self.handler.transport.flush()
self.handler.transport.worker.stop()
@blob_outputs.register('gs', condition=bool(StorageClient))
class GCPStorageOutput(BlobOutput):
def __init__(self, ctx, config=None):
super().__init__(ctx, config)
self.bucket = Bucket(StorageClient(), self.bucket)
def upload_file(self, path, key):
blob = self.bucket.blob(key)
blob.upload_from_filename(path)
|
<reponame>j-t-t/crash-model
from .. import standardize_crashes
from data.util import write_geocode_cache
from jsonschema import validate
import json
import os
import csv
import pandas as pd
from pandas.util.testing import assert_frame_equal
import geopandas as gpd
from shapely.geometry import Point
import pytz
import pytest
TEST_FP = os.path.dirname(os.path.abspath(__file__))
def create_test_csv(tmpdir, filename):
tmppath = tmpdir.strpath
test = [{
'key1': 'value1',
'key2': 'value2'
}, {
'key1': 'another value',
'key2': 5
}]
with open(os.path.join(tmppath, filename), 'w') as f:
writer = csv.DictWriter(f, list(test[0].keys()))
writer.writeheader()
for row in test:
writer.writerow(row)
return tmppath
def test_add_id(tmpdir):
"""
Create a dummy csv file without ID fields, add IDs to it
"""
tmppath = create_test_csv(tmpdir, 'test.csv')
filename = os.path.join(tmppath, 'test.csv')
standardize_crashes.add_id(filename, 'ID')
expected = [{
'ID': '1',
'key1': 'value1',
'key2': 'value2'
}, {
'ID': '2',
'key1': 'another value',
'key2': '5'
}]
with open(filename) as f:
csv_reader = csv.DictReader(f)
for i, row in enumerate(csv_reader):
assert row == expected[i]
# Test calling it again and make sure it doesn't change
standardize_crashes.add_id(filename, 'ID')
with open(filename) as f:
csv_reader = csv.DictReader(f)
for i, row in enumerate(csv_reader):
assert row == expected[i]
def test_numeric_and_string_ids():
"""
Confirm that crashes with both numeric and string ids pass validation
"""
test_crashes = [{
"id": 12345,
"dateOccurred": "2016-01-01T02:30:23-05:00",
"location": {
"latitude": 42.317987926802246,
"longitude": -71.06188127008645
}
}, {
"id": "A1B2C3D4E5",
"dateOccurred": "2016-01-01T02:30:23-05:00",
"location": {
"latitude": 42.317987926802246,
"longitude": -71.06188127008645
}
}
]
validate(
test_crashes,
json.load(open(
os.path.join(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__))))), "standards", "crashes-schema.json"))))
def test_standardize_with_cache(tmpdir):
fields = {
"id": "id",
"date_complete": "date_of_crash",
"time": "",
"time_format": "",
"latitude": "lat",
"longitude": "lng",
"address": 'location'
}
# Confirm crashes without coordinates or a geocoded address file are skipped
crashes_no_coords = [{
"id": "A1B2C3D4E5",
"date_of_crash": "2016-01-01T02:30:23-05:00",
"lat": "",
"lng": "",
'location': 'test',
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_no_coords, fields, {'address': 'location'},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 0
fields['latitude'] = ''
fields['longitude'] = ''
# Confirm crashes with a geocoded address are included
os.mkdir(os.path.join(tmpdir, 'processed'))
write_geocode_cache({'test test_city': ['test st', 42, -71, 'S']},
filename=tmpdir + '/processed/geocoded_addresses.csv')
assert len(standardize_crashes.read_standardized_fields(
crashes_no_coords, fields, {'address': 'location'},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 1
def test_date_formats(tmpdir):
"""
Test various combinations of supplying dates.
"""
fields_date_constructed = {
"id": "id",
"date_complete": "date_of_crash",
"time": "",
"time_format": "",
"latitude": "lat",
"longitude": "lng"
}
# Confirm crashes without coordinates and no address are skipped
crashes_no_coords = [{
"id": "A1B2C3D4E5",
"date_of_crash": "2016-01-01T02:30:23-05:00",
"lat": "",
"lng": ""
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_no_coords, fields_date_constructed, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 0
# Confirm crashes using date_complete but without a value are skipped
crashes_no_date = [{
"id": "A1B2C3D4E5",
"date_of_crash": "",
"lat": 42.317987926802246,
"lng": -71.06188127008645
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_no_date, fields_date_constructed, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 0
# Confirm crashes using date_complete with a value are standardized
crashes_with_date = [{
"id": "A1B2C3D4E5",
"date_of_crash": "2016-01-01T02:30:23-05:00",
"lat": 42.317987926802246,
"lng": -71.06188127008645
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_with_date, fields_date_constructed, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 1
# Confirm crashes using deconstructed date with all values are standardized
fields_date_deconstructed = {
"id": "id",
"date_complete": "",
"date_year": "year_of_crash",
"date_month": "month_of_crash",
"date_day": "day_of_crash",
"time": "",
"time_format": "",
"latitude": "lat",
"longitude": "lng"
}
crashes_with_date = [{
"id": "A1B2C3D4E5",
"year_of_crash": "2016",
"month_of_crash": "01",
"day_of_crash": "01",
"lat": 42.317987926802246,
"lng": -71.06188127008645
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_with_date, fields_date_deconstructed, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 1
# Confirm crashes outside of specified start & end year ranges are dropped
crashes_in_different_years = [{
"id": "1",
"date_of_crash": "2016-12-31T02:30:23-05:00",
"lat": 42.317987926802246,
"lng": -71.06188127008645
},
{
"id": "2",
"date_of_crash": "2017-01-01T02:30:23-05:00",
"lat": 42.317987926802246,
"lng": -71.06188127008645
},
{
"id": "3",
"date_of_crash": "2018-01-01T02:30:23-05:00",
"lat": 42.317987926802246,
"lng": -71.06188127008645
}]
# filter crashes prior to a start year
assert len(standardize_crashes.read_standardized_fields(
crashes_in_different_years, fields_date_constructed, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city',
startdate='2017-01-01T00:00:00-05:00')) == 2
# filter crashes after an end year
assert len(standardize_crashes.read_standardized_fields(
crashes_in_different_years, fields_date_constructed, {},
pytz.timezone("America/New_York"),
tmpdir, 'test_city',
enddate='2016-12-31')) == 1
# filter crashes after an end year
assert len(standardize_crashes.read_standardized_fields(
crashes_in_different_years, fields_date_constructed, {},
pytz.timezone("America/New_York"),
tmpdir, 'test_city',
enddate='2017-01-01')) == 2
# filter crashes between a start and end year
# assert len(standardize_crashes.read_standardized_fields(
# crashes_in_different_years, fields_date_constructed, {}, 2016, '2017-01-01T00:00:00-05:00')) == 1
# Confirm crashes using deconstructed date but missing a day are standardized with a random day
fields_date_no_day = {
"id": "id",
"date_complete": "",
"date_year": "year_of_crash",
"date_month": "month_of_crash",
"date_day": "",
"time": "",
"time_format": "",
"latitude": "lat",
"longitude": "lng"
}
crashes_with_date = [{
"id": "A1B2C3D4E5",
"year_of_crash": 2017,
"month_of_crash": 1,
"lat": 42.317987926802246,
"lng": -71.06188127008645
}]
assert len(standardize_crashes.read_standardized_fields(
crashes_with_date, fields_date_no_day, {},
pytz.timezone("America/New_York"), tmpdir, 'test_city')) == 1
def test_make_rollup():
"""
Tests total number of crashes per crash location is correctly calculated and
list of unique crash dates per location is correctly generated
"""
standardized_crashes = [{
"id": 1,
"dateOccurred": "2015-01-01T00:45:00-05:00",
"location": {
"latitude": 42.365,
"longitude": -71.106
},
"address": "GREEN ST & PLEASANT ST",
"vehicles": []
}, {
"id": 1,
"dateOccurred": "2015-04-15T00:45:00-05:00",
"location": {
"latitude": 42.365,
"longitude": -71.106
},
"address": "GREEN ST & PLEASANT ST",
"vehicles": []
}, {
"id": 1,
"dateOccurred": "2015-10-20T00:45:00-05:00",
"location": {
"latitude": 42.365,
"longitude": -71.106
},
"address": "GREEN ST & PLEASANT ST",
"vehicles": []
}, {
"id": 2,
"dateOccurred": "2015-01-01T01:12:00-05:00",
"location": {
"latitude": 42.361,
"longitude": -71.097
},
"address": "LANDSDOWNE ST & MASSACHUSETTS AVE",
"vehicles": []
}, {
"id": 3,
"dateOccurred": "2015-01-01T01:54:00-05:00",
"location": {
"latitude": 42.396,
"longitude": -71.127
},
"address": "LOCKE ST & SHEA RD",
"vehicles": []
}, {
"id": 3,
"dateOccurred": "2015-01-01T01:54:00-05:00",
"location": {
"latitude": 42.396,
"longitude": -71.127
},
"address": "LOCKE ST & SHEA RD",
"vehicles": []
}]
results = standardize_crashes.make_crash_rollup(standardized_crashes)
expected_rollup = gpd.GeoDataFrame()
expected_rollup["coordinates"] = [Point(-71.097, 42.361), Point(-71.106, 42.365), Point(-71.127, 42.396)]
expected_rollup["total_crashes"] = [1, 3, 2]
expected_rollup["crash_dates"] = ["2015-01-01T01:12:00-05:00",
"2015-01-01T00:45:00-05:00,2015-04-15T00:45:00-05:00,2015-10-20T00:45:00-05:00",
"2015-01-01T01:54:00-05:00"]
assert_frame_equal(results, expected_rollup)
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import mne
import numpy as np
from . import download as dl
import os
import glob
import zipfile
import yaml
from scipy.io import loadmat
from distutils.dir_util import copy_tree
import shutil
import pandas as pd
BI2014a_URL = 'https://zenodo.org/record/3266223/files/'
class BrainInvaders2014a():
'''
This dataset contains electroencephalographic (EEG) recordings of 71 subjects
playing to a visual P300 Brain-Computer Interface (BCI) videogame named Brain Invaders.
The interface uses the oddball paradigm on a grid of 36 symbols (1 Target, 35 Non-Target)
that are flashed pseudo-randomly to elicit the P300 response. EEG data were recorded
using 16 active dry electrodes with up to three game sessions. The experiment took place
at GIPSA-lab, Grenoble, France, in 2014. A full description of the experiment is available
at https://hal.archives-ouvertes.fr/hal-02171575. Python code for manipulating the data
is available at https://github.com/plcrodrigues/py.BI.EEG.2014a-GIPSA. The ID of this
dataset is bi2014a.
'''
def __init__(self):
self.subject_list = list(range(1, 65))
def _get_single_subject_data(self, subject):
"""return data for a single subject"""
file_path_list = self.data_path(subject)
sessions = {}
session_name = 'session_1'
sessions[session_name] = {}
run_name = 'run_1'
chnames = ['Fp1',
'Fp2',
'F3',
'AFz',
'F4',
'T7',
'Cz',
'T8',
'P7',
'P3',
'Pz',
'P4',
'P8',
'O1',
'Oz',
'O2',
'STI 014']
chtypes = ['eeg'] * 16 + ['stim']
file_path = file_path_list[0]
D = loadmat(file_path)['samples'].T
S = D[1:17, :]
stim = D[-1, :]
X = np.concatenate([S, stim[None, :]])
info = mne.create_info(ch_names=chnames, sfreq=512,
ch_types=chtypes, montage='standard_1020',
verbose=False)
raw = mne.io.RawArray(data=X, info=info, verbose=False)
sessions[session_name][run_name] = raw
return sessions
def data_path(self, subject, path=None, force_update=False,
update_path=None, verbose=None):
if subject not in self.subject_list:
raise(ValueError("Invalid subject number"))
# check if has the .zip
url = BI2014a_URL + 'subject_' + str(subject).zfill(2) + '.zip'
path_zip = dl.data_path(url, 'BRAININVADERS2014A')
path_folder = path_zip.strip(
'subject_' + str(subject).zfill(2) + '.zip')
# check if has to unzip
path_folder_subject = path_folder + \
'subject_' + str(subject).zfill(2) + os.sep
if not(os.path.isdir(path_folder_subject)):
os.mkdir(path_folder_subject)
print('unzip', path_zip)
zip_ref = zipfile.ZipFile(path_zip, "r")
zip_ref.extractall(path_folder_subject)
subject_paths = []
# filter the data regarding the experimental conditions
subject_paths.append(path_folder_subject +
'subject_' + str(subject).zfill(2) + '.mat')
return subject_paths
|
<filename>erl_tabular_experiments.py
import copy
import os
import re
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import roc_auc_score, f1_score
from sklearn.linear_model import LinearRegression, LogisticRegression
from yellowbrick.cluster import KElbowVisualizer
from anchors import utils, anchor_tabular, anchor_base, limes
from growingspheres import counterfactuals as cf
from growingspheres.utils.gs_utils import get_distances, generate_inside_ball, generate_categoric_inside_ball
import pyfolding as pf
from typing import Dict, Tuple
from scipy.stats import multinomial
import pickle
import random
def compute_linear_regression_precision(prediction_inside_sphere, labels_in_sphere):
min_threshold_regression, max_threshold_regression = min(prediction_inside_sphere), max(prediction_inside_sphere)
try:
thresholds_regression = np.arange(min_threshold_regression, max_threshold_regression, (max_threshold_regression-min_threshold_regression)/10 )
except ValueError:
thresholds_regression = [min_threshold_regression, max_threshold_regression]
precisions_regression = []
for threshold_regression in thresholds_regression:
prediction_inside_sphere_regression_test = []
for prediction_regression in prediction_inside_sphere:
# TODO regarder si c'est toujours 1 au dessus et 0 en dessous + S'occuper des cas multiclasses
if prediction_regression > threshold_regression:
prediction_inside_sphere_regression_test.append(1)
else:
prediction_inside_sphere_regression_test.append(0)
precision_regression = sum(prediction_inside_sphere_regression_test == labels_in_sphere)/len(prediction_inside_sphere_regression_test)
precisions_regression.append(precision_regression)
lime_extending_precision = max(precisions_regression)
return lime_extending_precision
def compute_labels_inside_sphere(erl_tabular, nb_training_instance_in_sphere, position_instances_in_sphere):
""" computation of the coverage inside the sphere for linear model on training data """
if nb_training_instance_in_sphere > 0:
# Check that there is at least one instance from the training dataset in the area of the hypersphere
labels_training_instance_in_sphere = erl_tabular.black_box_predict(erl_tabular.train_data[position_instances_in_sphere])
nb_training_instance_in_sphere_label_as_target = sum(y == erl_tabular.target_class for y in labels_training_instance_in_sphere)
return nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere
else:
nb_training_instance_in_sphere_label_as_target = 1
return nb_training_instance_in_sphere_label_as_target, None
"""
def compute_traditional_lime_precision_coverage(erl_tabular, instance, growing_sphere, radius, farthest_distance, nb_instance_train_data_label_as_target):
lime_traditional = erl_tabular.lime_explainer.explain_instance(instance, erl_tabular.black_box_predict, num_features=4, model_regressor = LogisticRegression())
position_instances_in_sphere, nb_training_instance_in_sphere = erl_tabular.instances_from_dataset_inside_sphere(instance, 2*growing_sphere.radius)
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere = compute_labels_inside_sphere(erl_tabular, nb_training_instance_in_sphere,
position_instances_in_sphere)
instances_in_sphere_traditional, labels_in_sphere_traditional, percentage_distribution_traditional, _ = erl_tabular.generate_instances_inside_sphere(growing_sphere,
radius, instance,
farthest_distance, erl_tabular.nb_min_instance_per_class_in_sphere,
position_instances_in_sphere, nb_training_instance_in_sphere)
prediction_inside_sphere_traditional = erl_tabular.modify_instance_for_linear_model(lime_traditional, instances_in_sphere_traditional)
lime_traditional_precision = sum(labels_in_sphere_traditional == prediction_inside_sphere_traditional)/len(prediction_inside_sphere_traditional)
lime_traditional_coverage = nb_training_instance_in_sphere_label_as_target/nb_instance_train_data_label_as_target
return lime_traditional_precision, lime_traditional_coverage, (lime_traditional_precision+lime_traditional_coverage)/2
"""
def compute_anchor_precision_coverage(erl_tabular, instance, labels_instance_train_data, nb_instances_in_sphere,
farthest_distance, percentage_distribution, nb_instance_train_data_label_as_target):
anchor_exp = erl_tabular.anchor_explainer.explain_instance(instance, erl_tabular.black_box_predict, threshold=erl_tabular.threshold_precision,
delta=0.1, tau=0.15, batch_size=100, max_anchor_size=None, stop_on_first=False,
desired_label=None, beam_size=4)
if erl_tabular.verbose:
print("anchors explanation find, now let's go for linear !")
print('Anchor: %s' % (' AND '.join(anchor_exp.names())))
""" Generate rules and data frame for applying anchors """
rules, training_instances_pandas_frame = erl_tabular.generate_rule_and_data_for_anchors(anchor_exp.names(), erl_tabular.target_class, erl_tabular.train_data)
""" Apply anchors and returns his assiciated coverage and precision """
training_instances_in_anchor = erl_tabular.get_base_model_data(rules, training_instances_pandas_frame)
""" Computes the number of instances from the training set that are classified as the target instance and validate the anchor rules. """
index_instances_train_data_labels_as_target = np.where([x == erl_tabular.target_class for x in labels_instance_train_data])
instances_from_index = erl_tabular.train_data[index_instances_train_data_labels_as_target]
coverage_training_instances_in_anchor = training_instances_in_anchor.copy()
nb_train_instances_in_anchor = 0
for instance_index in instances_from_index:
matches = coverage_training_instances_in_anchor[(coverage_training_instances_in_anchor==instance_index).all(axis=1)]
if len(matches)>0:
nb_train_instances_in_anchor += 1
""" Generates artificial instances in the area of the anchor rules until there are as many instances as in the hypersphere """
if erl_tabular.verbose: print("Computing precision and coverage for anchors.")
instances_in_anchor = erl_tabular.generate_artificial_instances_in_anchor(training_instances_in_anchor, nb_instances_in_sphere, instance,
rules, farthest_distance, percentage_distribution)
labels_in_anchor = erl_tabular.black_box_predict(instances_in_anchor)
anchor_coverage = nb_train_instances_in_anchor/nb_instance_train_data_label_as_target
anchor_precision = sum(labels_in_anchor == erl_tabular.target_class)/len(labels_in_anchor)
f1_anchor = (anchor_coverage+anchor_precision)/2
return anchor_precision, anchor_coverage, f1_anchor
def compute_lime_extending_precision_coverage(erl_tabular, instances_in_sphere, labels_in_sphere, growing_sphere,
farthest_distance, dicrease_radius, nb_instance_train_data_label_as_target,
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere):
""" Lime explanation and computation of precision inside the initial hypersphere """
ls_raw_data = erl_tabular.lime_explainer.explain_instance_training_dataset(erl_tabular.closest_counterfactual, erl_tabular.black_box_predict,
num_features=4, model_regressor = LogisticRegression(),
test=False,
instances_in_sphere=instances_in_sphere,
labels_in_sphere=labels_in_sphere)
prediction_inside_sphere = erl_tabular.modify_instance_for_linear_model(ls_raw_data, instances_in_sphere)
precision_ls_raw_data = sum(labels_in_sphere == prediction_inside_sphere)/len(prediction_inside_sphere)
radius = growing_sphere.radius
final_precision = 0
while precision_ls_raw_data > erl_tabular.threshold_precision and radius < farthest_distance:
""" Extending the hypersphere radius until the precision inside the hypersphere is lower than the threshold
and the radius of the hyper sphere is not longer than the distances to the farthest instance from the dataset """
final_precision = precision_ls_raw_data
last_radius = radius
radius += (dicrease_radius - 1) * radius/5.0
position_instances_in_sphere, nb_training_instance_in_sphere = erl_tabular.instances_from_dataset_inside_sphere(erl_tabular.closest_counterfactual, radius)
instances_in_sphere, labels_in_sphere, percentage_distribution, _ = erl_tabular.generate_instances_inside_sphere(growing_sphere,
radius, erl_tabular.closest_counterfactual,
farthest_distance, erl_tabular.nb_min_instance_per_class_in_sphere,
position_instances_in_sphere, nb_training_instance_in_sphere)
ls_raw_data = erl_tabular.lime_explainer.explain_instance_training_dataset(erl_tabular.closest_counterfactual, erl_tabular.black_box_predict,
num_features=4, model_regressor = LogisticRegression(),
instances_in_sphere=instances_in_sphere,
labels_in_sphere=labels_in_sphere)
prediction_inside_sphere = erl_tabular.modify_instance_for_linear_model(ls_raw_data, instances_in_sphere, test=True)
precision_ls_raw_data = compute_linear_regression_precision(prediction_inside_sphere, labels_in_sphere)
if final_precision > precision_ls_raw_data:
precision_ls_raw_data = final_precision
radius = last_radius
lime_extending_coverage_before = nb_training_instance_in_sphere_label_as_target/nb_instance_train_data_label_as_target
position_instances_in_sphere, nb_training_instance_in_sphere = erl_tabular.instances_from_dataset_inside_sphere(erl_tabular.closest_counterfactual, radius)
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere = compute_labels_inside_sphere(erl_tabular, nb_training_instance_in_sphere,
position_instances_in_sphere)
""" computation of the coverage inside the sphere for linear model on training data """
lime_extending_coverage = nb_training_instance_in_sphere_label_as_target/nb_instance_train_data_label_as_target
if lime_extending_coverage_before != lime_extending_coverage:
print("lime extending coverage before", lime_extending_coverage_before)
print("lime extending coverage now", lime_extending_coverage)
f1_lime_extending = (precision_ls_raw_data + lime_extending_coverage)/2
return precision_ls_raw_data, lime_extending_coverage, f1_lime_extending
def compute_other_linear_explanation_precision_coverage(erl_tabular, nb_training_instance_in_sphere, position_instances_in_sphere,
instances_in_sphere, labels_in_sphere, nb_instance_train_data_label_as_target,
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere):
#lime_exp_not_bin = erl_tabular.lime_explainer.explain_instance_training_dataset(erl_tabular.instance_to_explain, erl_tabular.black_box_predict,
# num_features=4, model_regressor = LogisticRegression())
ls_raw_data_linear_regression = erl_tabular.lime_explainer.explain_instance_training_dataset(erl_tabular.closest_counterfactual,
erl_tabular.black_box_predict_proba, num_features=4)
prediction_inside_sphere = erl_tabular.modify_instance_for_linear_model(ls_raw_data_linear_regression, instances_in_sphere)
precision_ls_raw_data_linear_regression = compute_linear_regression_precision(prediction_inside_sphere, labels_in_sphere)
# Compute precision for a local Surrogate model with a classical Linear Regression model as explanation model
local_surrogate_exp_regression = erl_tabular.lime_explainer.explain_instance(erl_tabular.closest_counterfactual,
erl_tabular.black_box_predict, num_features=4,
model_regressor = LogisticRegression())
prediction_inside_sphere = erl_tabular.modify_instance_for_linear_model(local_surrogate_exp_regression, instances_in_sphere)
precision_ls_linear_regression = compute_linear_regression_precision(prediction_inside_sphere, labels_in_sphere)
if erl_tabular.verbose: print("Computing multiple linear explanation models precision and coverage.")
lime_coverage = nb_training_instance_in_sphere_label_as_target/nb_instance_train_data_label_as_target
f1_not_bin_lime = (precision_ls_raw_data_linear_regression+lime_coverage)/2
f1_lime_regression = (precision_ls_linear_regression+lime_coverage)/2
return precision_ls_linear_regression, precision_ls_raw_data_linear_regression, lime_coverage, lime_coverage, f1_lime_regression, f1_not_bin_lime
def compute_all_explanation_method_precision(erl_tabular, instance, growing_sphere, dicrease_radius, radius,
nb_training_instance_in_sphere, nb_instance_train_data_label_as_target,
position_instances_in_sphere, instances_in_sphere, labels_in_sphere,
farthest_distance, percentage_distribution):
labels_instance_train_data = erl_tabular.black_box_predict(erl_tabular.train_data)
erl_tabular.instance_to_explain = instance
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere = compute_labels_inside_sphere(erl_tabular, nb_training_instance_in_sphere,
position_instances_in_sphere)
local_surrogate_extend_raw_precision, local_surrogate_extend_raw_coverage, f1_local_surrogate_extend_raw = compute_lime_extending_precision_coverage(erl_tabular, instances_in_sphere,
labels_in_sphere, growing_sphere, farthest_distance, dicrease_radius, nb_instance_train_data_label_as_target,
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere)
anchor_precision, anchor_coverage, f1_anchor = compute_anchor_precision_coverage(erl_tabular, instance, labels_instance_train_data, len(instances_in_sphere),
farthest_distance, percentage_distribution, nb_instance_train_data_label_as_target)
# TODO anchor coverage is zero
erl_precision = anchor_precision if erl_tabular.multimodal_results else local_surrogate_extend_raw_precision
erl_coverage = anchor_coverage if erl_tabular.multimodal_results else local_surrogate_extend_raw_coverage
f1_erl = f1_anchor if erl_tabular.multimodal_results else f1_local_surrogate_extend_raw
if erl_tabular.verbose:
print("Anchor precision ", np.round(anchor_precision, decimals=3))
print("Anchors coverage : ", np.round(anchor_coverage, decimals=3))
print("F1 score for anchor:", np.round(f1_anchor, decimals=3))
print("Local Surrogate extended precision :", np.round(local_surrogate_extend_raw_precision, decimals=3))
print("The coverage of the sphere is : ", np.round(local_surrogate_extend_raw_coverage, decimals=3))
print("F1 score for lime extending:", np.round(f1_local_surrogate_extend_raw, decimals=3))
print("ERL precision : ", np.round(erl_precision, decimals=3))
print("ERL coverage : ", np.round(erl_coverage, decimals=3))
print("ERL F1 score:", np.round(f1_erl, decimals=3))
precisions = [local_surrogate_extend_raw_precision, erl_precision, anchor_precision]
coverages = [local_surrogate_extend_raw_coverage, erl_coverage, anchor_coverage]
f1s = [f1_local_surrogate_extend_raw, f1_erl, f1_anchor]
multimodal = 1 if erl_tabular.multimodal_results else 0
return precisions, coverages, f1s, multimodal
def compute_local_surrogate_precision_coverage(erl_tabular, instance, growing_sphere,
instances_in_sphere, labels_in_sphere,
position_instances_in_sphere, nb_training_instance_in_sphere, all_linear=True):
labels_instance_train_data = erl_tabular.black_box_predict(erl_tabular.train_data)
nb_instance_train_data_label_as_target = sum(x == erl_tabular.target_class for x in labels_instance_train_data)
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere = compute_labels_inside_sphere(erl_tabular, nb_training_instance_in_sphere,
position_instances_in_sphere)
"""
ls_raw_data = erl_tabular.lime_explainer.explain_instance(erl_tabular.closest_counterfactual, erl_tabular.black_box_predict,
num_features=4, model_regressor = LogisticRegression())
prediction_inside_sphere = erl_tabular.modify_instance_for_linear_model(ls_raw_data, instances_in_sphere)
precision_local_surrogate = sum(labels_in_sphere == prediction_inside_sphere)/len(prediction_inside_sphere)
"""
""" computation of the coverage inside the sphere for linear model on training data """
"""
local_surrogate_coverage = nb_training_instance_in_sphere_label_as_target/nb_instance_train_data_label_as_target
f1_local_surrogate = (precision_local_surrogate + local_surrogate_coverage) / 2
"""
if all_linear:
ls_regression_precision, ls_not_bin_precision, ls_coverage, ls_coverage, f1_ls_regression, f1_not_bin_ls = compute_other_linear_explanation_precision_coverage(erl_tabular,
nb_training_instance_in_sphere, position_instances_in_sphere,
instances_in_sphere, labels_in_sphere, nb_instance_train_data_label_as_target,
nb_training_instance_in_sphere_label_as_target, labels_training_instance_in_sphere)
"""
local_surrogate_extend_raw_precision, local_surrogate_extend_raw_coverage, f1_local_surrogate_extend_raw = compute_lime_extending_precision_coverage(erl_tabular,
instances_in_sphere, labels_in_sphere, growing_sphere, farthest_distance, dicrease_radius,
nb_instance_train_data_label_as_target, nb_training_instance_in_sphere_label_as_target,
labels_training_instance_in_sphere)
"""
precision = [ls_regression_precision, ls_not_bin_precision]
coverage = [ls_coverage, ls_coverage]
f1 = [f1_ls_regression, f1_not_bin_ls]
else:
precision = [precision_local_surrogate]
coverage = [local_surrogate_coverage]
f1 = [f1_local_surrogate]
return precision, coverage, f1
def simulate_user_experiments(erl_tabular, instance, nb_features_employed):
target_class = erl_tabular.black_box_predict(instance.reshape(1, -1))[0]
erl_tabular.target_class = target_class
# Computes the distance to the farthest instance from the training dataset to bound generating instances
farthest_distance = 0
for training_instance in erl_tabular.train_data:
if get_distances(training_instance, instance)["euclidean"] > farthest_distance:
farthest_distance = np.round(get_distances(training_instance, instance)["euclidean"], decimals=3)
growing_sphere = cf.CounterfactualExplanation(instance, erl_tabular.black_box_predict, method='GS', target_class=None,
continuous_features=erl_tabular.continuous_features, categorical_features=erl_tabular.categorical_features,
categorical_values=erl_tabular.categorical_values)
growing_sphere.fit(n_in_layer=2000, first_radius=0.1, dicrease_radius=10, sparse=True,
verbose=erl_tabular.verbose, feature_variance=erl_tabular.feature_variance,
farthest_distance_training_dataset=farthest_distance, probability_categorical_feature=erl_tabular.probability_categorical_feature,
min_counterfactual_in_sphere=erl_tabular.nb_min_instance_per_class_in_sphere)
first_growing_sphere = cf.CounterfactualExplanation(growing_sphere.enemy, erl_tabular.black_box_predict, method='GS', target_class=target_class,
continuous_features=erl_tabular.continuous_features, categorical_features=erl_tabular.categorical_features,
categorical_values=erl_tabular.categorical_values)
first_growing_sphere.fit(n_in_layer=2000, first_radius=0.1, dicrease_radius=10, sparse=True,
verbose=erl_tabular.verbose, feature_variance=erl_tabular.feature_variance,
farthest_distance_training_dataset=farthest_distance, probability_categorical_feature=erl_tabular.probability_categorical_feature,
min_counterfactual_in_sphere=erl_tabular.nb_min_instance_per_class_in_sphere)
# get_distance is similar to pairwise distance (i.e: it is the same results for euclidean distance)
# but it adds a sparsity distance computation (i.e: number of same values)
closest_counterfactual = first_growing_sphere.enemy
""" Generates or store instances in the area of the hypersphere and their correspoinding labels """
min_instance_per_class = erl_tabular.nb_min_instance_per_class_in_sphere
position_instances_in_sphere, nb_training_instance_in_sphere = erl_tabular.instances_from_dataset_inside_sphere(closest_counterfactual, growing_sphere.radius)
instances_in_sphere, labels_in_sphere, percentage_distribution, instances_in_sphere_libfolding = erl_tabular.generate_instances_inside_sphere(growing_sphere,
growing_sphere.radius, closest_counterfactual,
farthest_distance, min_instance_per_class, position_instances_in_sphere,
nb_training_instance_in_sphere, libfolding=True)
""" Compute the libfolding test to verify wheter instances in the area of the hyper sphere is multimodal or unimodal """
if instances_in_sphere_libfolding != []:
index_counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, erl_tabular.target_class, libfolding=True)
counterfactual_instances_in_sphere = instances_in_sphere[index_counterfactual_instances_in_sphere]
counterfactual_libfolding = instances_in_sphere_libfolding[index_counterfactual_instances_in_sphere]
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius,
counterfactual_libfolding=counterfactual_libfolding)
else:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, erl_tabular.target_class)
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius)
nb = 0
while not unimodal_test:
min_instance_per_class *= 2
instances_in_sphere, labels_in_sphere, percentage_distribution, instances_in_sphere_libfolding = erl_tabular.generate_instances_inside_sphere(growing_sphere,
growing_sphere.radius, closest_counterfactual,
farthest_distance, min_instance_per_class,
position_instances_in_sphere, nb_training_instance_in_sphere,
libfolding=True)
if instances_in_sphere_libfolding != []:
index_counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, erl_tabular.target_class, libfolding=True)
counterfactual_instances_in_sphere = instances_in_sphere[index_counterfactual_instances_in_sphere]
counterfactual_libfolding = instances_in_sphere_libfolding[index_counterfactual_instances_in_sphere]
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius,
counterfactual_libfolding=counterfactual_libfolding)
else:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, erl_tabular.target_class)
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius)
print("nb times libfolding is not able to determine wheter datas are unimodal or multimodal:", nb)
print("There are ", len(counterfactual_instances_in_sphere), " instances in the datas given to libfolding.")
print()
nb += 1
anchor_exp = erl_tabular.anchor_explainer.explain_instance(instance, erl_tabular.black_box_predict, threshold=erl_tabular.threshold_precision,
delta=0.1, tau=0.15, batch_size=100, max_anchor_size=None, stop_on_first=False,
desired_label=None, beam_size=4)
# Generate rules and data frame for applying anchors
#print("rule by anchor", anchor_exp.names())
rules, training_instances_pandas_frame, features_employed_in_rule = erl_tabular.generate_rule_and_data_for_anchors(anchor_exp.names(),
erl_tabular.target_class, erl_tabular.train_data,
simulated_user_experiment=True)
if not erl_tabular.multimodal_results:
ls_raw_data = erl_tabular.lime_explainer.explain_instance_training_dataset(instance,
erl_tabular.black_box_predict_proba,
num_features=nb_features_employed,
instances_in_sphere=instances_in_sphere)
features_linear_employed = []
for feature_linear_employed in ls_raw_data.as_list():
features_linear_employed.append(feature_linear_employed[0])
#print("features linear employed", features_linear_employed)
rules, training_instances_pandas_frame, features_employed_in_linear = erl_tabular.generate_rule_and_data_for_anchors(features_linear_employed,
erl_tabular.target_class, erl_tabular.train_data,
simulated_user_experiment=True)
features_employed_by_erl = features_employed_in_linear
else:
features_employed_by_erl = features_employed_in_rule
local_surrogate = erl_tabular.lime_explainer.explain_instance(closest_counterfactual,
erl_tabular.black_box_predict_proba,
num_features=nb_features_employed)
features_linear_employed = []
for feature_linear_employed in local_surrogate.as_list():
features_linear_employed.append(feature_linear_employed[0])
#print("features linear employed", features_linear_employed)
rules, training_instances_pandas_frame, features_employed_in_linear = erl_tabular.generate_rule_and_data_for_anchors(features_linear_employed,
erl_tabular.target_class, erl_tabular.train_data,
simulated_user_experiment=True)
return features_employed_in_linear, features_employed_by_erl, features_employed_in_rule
def modify_dataset(dataset, nb_feature_to_set_0):
#for i in range(nb_feature_to_set_0):
feature_modified = random.sample(range(0, len(dataset[0])), nb_feature_to_set_0)
feature_kept = set(range(len(dataset[0]))).difference(feature_modified)
dataset_to_return = dataset.copy()
"""
for feature_modify in feature_modified:
random_value = np.random.uniform(int(min(dataset[:,feature_modify])), int(max(dataset[:,feature_modify])), len(dataset)).tolist()
dataset_to_return[:,feature_modify] = random_value
"""
dataset_to_return[:,feature_modified] = 0
return dataset_to_return, feature_kept
def decision_tree_function(clf, instance):
feature = clf.tree_.feature
node_indicator = clf.decision_path(instance)
leaf_id = clf.apply(instance)
sample_id = 0
# obtain ids of the nodes `sample_id` goes through, i.e., row `sample_id`
node_index = node_indicator.indices[node_indicator.indptr[sample_id]:
node_indicator.indptr[sample_id + 1]]
#print('Rules used to predict sample {id}:\n'.format(id=sample_id))
feature_employed = []
for node_id in node_index:
# continue to the next node if it is a leaf node
if leaf_id[sample_id] == node_id:
continue
feature_employed.append(feature[node_id])
return set(feature_employed)
def simulate_user_experiments_lime_ls(instance, nb_features_employed, erl_tabular):
target_class = erl_tabular.black_box_predict(instance.reshape(1, -1))[0]
erl_tabular.target_class = target_class
# Computes the distance to the farthest instance from the training dataset to bound generating instances
farthest_distance = 0
for training_instance in erl_tabular.train_data:
if get_distances(training_instance, instance)["euclidean"] > farthest_distance:
farthest_distance = np.round(get_distances(training_instance, instance)["euclidean"], decimals=3)
growing_sphere = cf.CounterfactualExplanation(instance, erl_tabular.black_box_predict, method='GS', target_class=None,
continuous_features=erl_tabular.continuous_features, categorical_features=erl_tabular.categorical_features,
categorical_values=erl_tabular.categorical_values)
growing_sphere.fit(n_in_layer=2000, first_radius=0.1, dicrease_radius=10, sparse=True,
verbose=erl_tabular.verbose, feature_variance=erl_tabular.feature_variance,
farthest_distance_training_dataset=farthest_distance, probability_categorical_feature=erl_tabular.probability_categorical_feature,
min_counterfactual_in_sphere=erl_tabular.nb_min_instance_per_class_in_sphere)
closest_counterfactual = growing_sphere.enemy
""" Generates or store instances in the area of the hypersphere and their correspoinding labels """
min_instance_per_class = erl_tabular.nb_min_instance_per_class_in_sphere
position_instances_in_sphere, nb_training_instance_in_sphere = erl_tabular.instances_from_dataset_inside_sphere(closest_counterfactual, growing_sphere.radius)
instances_in_sphere, labels_in_sphere, percentage_distribution, instances_in_sphere_libfolding = erl_tabular.generate_instances_inside_sphere(growing_sphere,
growing_sphere.radius, closest_counterfactual,
farthest_distance, min_instance_per_class, position_instances_in_sphere,
nb_training_instance_in_sphere, libfolding=True)
""" Compute the libfolding test to verify wheter instances in the area of the hyper sphere is multimodal or unimodal """
if instances_in_sphere_libfolding != []:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere_libfolding, target_class)
else:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, target_class)
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius)
nb = 0
while not unimodal_test:
min_instance_per_class *= 2
instances_in_sphere, labels_in_sphere, percentage_distribution, instances_in_sphere_libfolding = erl_tabular.generate_instances_inside_sphere(growing_sphere,
growing_sphere.radius, closest_counterfactual,
farthest_distance, min_instance_per_class,
position_instances_in_sphere, nb_training_instance_in_sphere,
libfolding=True)
if instances_in_sphere_libfolding != []:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere_libfolding, target_class)
else:
counterfactual_instances_in_sphere = erl_tabular.store_counterfactual_instances_in_sphere(instances_in_sphere, target_class)
unimodal_test = erl_tabular.check_test_unimodal_data(np.array(counterfactual_instances_in_sphere), instances_in_sphere, growing_sphere.radius)
print("nb times libfolding is not able to determine wheter datas are unimodal or multimodal:", nb)
print("There are ", len(counterfactual_instances_in_sphere), " instances in the datas given to libfolding.")
print()
nb += 1
if not erl_tabular.multimodal_results:
lime_exp = erl_tabular.lime_explainer.explain_instance(instance, erl_tabular.black_box_predict_proba, num_features=nb_features_employed)
#lime_exp = erl_tabular.lime_explainer.explain_instance(instance, erl_tabular.black_box_predict, num_features=nb_features_employed,
#model_regressor = LogisticRegression())
#print("all", lime_exp.as_list())
features_linear_employed = []
for feature_linear_employed in lime_exp.as_list():
features_linear_employed.append(feature_linear_employed[0])
#print("features linear employed", features_linear_employed)
rules, training_instances_pandas_frame, features_employed_in_linear = erl_tabular.generate_rule_and_data_for_anchors(features_linear_employed,
target_class, erl_tabular.train_data,
simulated_user_experiment=True)
growing_sphere_closest_cf = cf.CounterfactualExplanation(closest_counterfactual, erl_tabular.black_box_predict, method='GS', target_class=target_class,
continuous_features=erl_tabular.continuous_features, categorical_features=erl_tabular.categorical_features,
categorical_values=erl_tabular.categorical_values)
growing_sphere_closest_cf.fit(n_in_layer=2000, first_radius=0.1, dicrease_radius=10, sparse=True,
verbose=erl_tabular.verbose, feature_variance=erl_tabular.feature_variance, farthest_distance_training_dataset=farthest_distance,
probability_categorical_feature=erl_tabular.probability_categorical_feature, min_counterfactual_in_sphere=erl_tabular.nb_min_instance_per_class_in_sphere)
instance_local_surrogate = growing_sphere_closest_cf.enemy
print("classe cible", target_class)
print("classe de l'instance donnée à LS", erl_tabular.black_box_predict(instance_local_surrogate.reshape(1, -1))[0])
local_surrogate_exp = erl_tabular.lime_explainer.explain_instance_training_dataset(instance,
erl_tabular.black_box_predict_proba,
num_features=nb_features_employed)#,
#model_regressor = LogisticRegression())
"""
local_surrogate_exp = erl_tabular.lime_explainer.explain_instance(instance_local_surrogate,
erl_tabular.black_box_predict_proba,
num_features=nb_features_employed)
"""
features_local_surrogate_employed = []
for feature_local_surrogate_employed in local_surrogate_exp.as_list():
features_local_surrogate_employed.append(feature_local_surrogate_employed[0])
rules, training_instances_pandas_frame, features_employed_in_local_surrogate = erl_tabular.generate_rule_and_data_for_anchors(features_local_surrogate_employed,
target_class, erl_tabular.train_data,
simulated_user_experiment=True)
"""
counter_factual_class = erl_tabular.black_box_predict(closest_counterfactual.reshape(1,-1))[0]
print("la classe du contre factuel le plus proche : ", counter_factual_class)
print('Lime explanation for %s' % erl_tabular.class_names[target_class])
print('\n'.join(map(str, lime_exp.as_list())))
print('Local Surrogate explanation for %s' % erl_tabular.class_names[counter_factual_class])
print('\n'.join(map(str, local_surrogate_exp.as_list())))
"""
features_employed_in_linear.sort()
features_employed_in_local_surrogate.sort()
return features_employed_in_linear, features_employed_in_local_surrogate
else:
return [], []
|
# Copyright 2020-2021 (c) <NAME>, AFOTEK Anlagen für Oberflächentechnik GmbH
# Copyright 2021 (c) <NAME>, konzeptpark GmbH
# Copyright 2021 (c) <NAME>, ISW University of Stuttagart (for umati and VDW e.V.)
# Copyright 2021 (c) <NAME>, VDW - Verein Deutscher Werkzeugmaschinenfabriken e.V.
# Imports
import os
import asyncio
import logging
import time
from datetime import datetime
from asyncua import Server, ua
from asyncua.common.ua_utils import value_to_datavalue
from importer import CSV_IMPORTER
from datavalue_parser import parse_to_datavalue
logging.basicConfig(level=logging.WARNING)
_logger = logging.getLogger('asyncua')
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
build_date = datetime(2021, 4, 9, 18, 00)
time_value = None
async def main():
time_value = time.time()
print("Start setup...")
# Serversetup
server = Server()
server.name = "umati-Sample-Server"
await server.init()
await server.set_build_info(
product_uri="https://github.com/umati/Sample-Server",
product_name="umati Sample Server",
manufacturer_name="umati community",
software_version="alpha",
build_number="202106011800",
build_date=build_date,
)
server.set_security_policy([
ua.SecurityPolicyType.NoSecurity,
])
server.set_security_IDs([
"Anonymous",
])
server.set_endpoint("opc.tcp://0.0.0.0:4840")
print(f"Setup done! {time.time()-time_value}s")
##################################################################################################################
time_value = time.time()
print("Importing companion spec. XML...")
# Import Opc.Ua.Di.NodeSet2.xml
try:
await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.Di.NodeSet2.xml"))
except Exception as e:
print(e)
di_idx = await server.get_namespace_index("http://opcfoundation.org/UA/DI/")
# Import Opc.Ua.Machinery.NodeSet2.xml
try:
await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.Machinery.NodeSet2.xml"))
except Exception as e:
print(e)
ma_idx = await server.get_namespace_index("http://opcfoundation.org/UA/Machinery/")
# Import Opc.Ua.SurfaceTechnology.NodeSet2.xml
try:
await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.SurfaceTechnology.NodeSet2.xml"))
except Exception as e:
print(e)
st_idx = await server.get_namespace_index("http://opcfoundation.org/UA/SurfaceTechnology/")
# Import Opc.Ua.Ijt.Tightening.NodeSet2.xml
try:
await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.Ijt.Tightening.NodeSet2.xml"))
except Exception as e:
print(e)
ijt_idx = await server.get_namespace_index("http://opcfoundation.org/UA/IJT/")
# # Import Opc.Ua.Ia.NodeSet2.xml
# try:
# await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.IA.NodeSet2.xml"))
# except Exception as e:
# print(e)
#
# ia_idx = await server.get_namespace_index("http://opcfoundation.org/UA/IA/")
#
# # Import Opc.Ua.MachineTool.NodeSet2.xml
# try:
# await server.import_xml(os.path.join(BASE_DIR, "nodeset", "Opc.Ua.MachineTool.Nodeset2.xml"))
# except Exception as e:
# print(e)
#
# mt_idx = await server.get_namespace_index("http://opcfoundation.org/UA/MachineTool/")
##################################################################################################################
print(f"Import done! {time.time()-time_value}s")
time_value = time.time()
print("Importing models...")
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "CoatingLine-example.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "ConveyorGunsAxes.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "Materialsupplyroom.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "dosingsystem.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "ovenbooth.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "Pretreatment.xml"))
except Exception as e:
print(e)
try:
await server.import_xml(os.path.join(BASE_DIR, "src", "models", "ijt_tightening_server.xml"))
except Exception as e:
print(e)
print(f"Import done! {time.time()-time_value}s")
##################################################################################################################
time_value = time.time()
print("Create TypeDefinitions from XML...")
# Load TypeDefinitions
await server.load_data_type_definitions()
print(f"TypeDefinitions created! {time.time()-time_value}s")
time_value = time.time()
print("Start importing CSV-Data...")
# read csv and generate data
imp = CSV_IMPORTER(server=server)
await imp.read_csv(os.path.join(BASE_DIR, "src", "data", "data.csv"))
data = []
data = await imp.get_rows()
print(f"Import done! {time.time()-time_value}s")
print("Starting Server...")
async with server:
print(f"Server is now running!")
time_value = time.time()
while 1:
for row in data:
await asyncio.sleep(1)
for item in row:
# item = ((node, dtype, bname), val)
try:
dv = await parse_to_datavalue(item, time_value, build_date)
except Exception as e:
print(item, e)
dv = None
if dv is not None:
new_dv = ua.DataValue(
Value=dv.Value,
StatusCode_=dv.StatusCode_,
SourceTimestamp=dv.SourceTimestamp,
ServerTimestamp=datetime.utcnow()
)
await server.write_attribute_value(item[0][0].nodeid, new_dv, ua.AttributeIds.Value)
# Start Server
if __name__ == "__main__":
asyncio.run(main())
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from multiset_codec.msbst import (
insert_then_forward_lookup,
reverse_lookup_then_remove,
to_sequence,
forward_lookup,
reverse_lookup,
build_multiset
)
def test_insert_then_forward_lookup():
'''
Incrementally test insert_then_forward_lookup function, starting from an
empty multiset.
'''
multiset = ()
leaf = (), ()
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'c')
assert multiset == (1, 'c', *leaf)
assert (start, freq) == (0, 1)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'a')
assert multiset == (2, 'c', (1, 'a', *leaf), ())
assert (start, freq) == (0, 1)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'a')
assert multiset == (3, 'c', (2, 'a', *leaf), ())
assert (start, freq) == (0, 2)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'b')
assert multiset == (4, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
())
assert (start, freq) == (2, 1)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'c')
assert multiset == (5, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
())
assert (start, freq) == (3, 2)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'e')
assert multiset == (6, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
(1, 'e', *leaf))
assert (start, freq) == (5, 1)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'd')
assert multiset == (7, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
(2, 'e',
(1, 'd', *leaf),
()))
assert (start, freq) == (5, 1)
multiset, (start, freq) = insert_then_forward_lookup(multiset, 'f')
assert multiset == (8, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
(3, 'e',
(1, 'd', *leaf),
(1, 'f', *leaf)))
assert (start, freq) == (7, 1)
def test_reverse_lookup_then_remove():
'''
Incrementally test reverse_lookup_then_remove function, starting from
the last multiset in test_insert_then_forward_lookup.
'''
leaf = (), ()
multiset = (8, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
(3, 'e',
(1, 'd', *leaf),
(1, 'f', *leaf)))
# 0 1 2 3 4 5 6 7
# a |b| c |d|e|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 3)
assert x == 'c'
assert (start, freq) == (3, 2)
assert multiset == (7, 'c',
(3, 'a',
(),
(1, 'b', *leaf)),
(3, 'e',
(1, 'd', *leaf),
(1, 'f', *leaf)))
# 0 1 2 3 4 5 6
# a |b|c|d|e|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 1)
assert x == 'a'
assert (start, freq) == (0, 2)
assert multiset == (6, 'c',
(2, 'a',
(),
(1, 'b', *leaf)),
(3, 'e',
(1, 'd', *leaf),
(1, 'f', *leaf)))
# 0 1 2 3 4 5
# a|b|c|d|e|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 3)
assert x == 'd'
assert (start, freq) == (3, 1)
assert multiset == (5, 'c',
(2, 'a',
(),
(1, 'b', *leaf)),
(2, 'e',
(),
(1, 'f', *leaf)))
# 0 1 2 3 4
# a|b|c|e|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 0)
assert x == 'a'
assert (start, freq) == (0, 1)
assert multiset == (4, 'c',
(1, 'a',
(),
(1, 'b', *leaf)),
(2, 'e',
(),
(1, 'f', *leaf)))
# 0 1 2 3
# b|c|e|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 2)
assert x == 'e'
assert (start, freq) == (2, 1)
assert multiset == (3, 'c',
(1, 'a',
(),
(1, 'b', *leaf)),
(1, 'e',
(),
(1, 'f', *leaf)))
# 0 1 2
# b|c|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 1)
assert x == 'c'
assert (start, freq) == (1, 1)
assert multiset == (2, 'c',
(1, 'a',
(),
(1, 'b', *leaf)),
(1, 'e',
(),
(1, 'f', *leaf)))
# 0 1
# b|f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 0)
assert x == 'b'
assert (start, freq) == (0, 1)
assert multiset == (1, 'c',
(),
(1, 'e',
(),
(1, 'f', *leaf)))
# 0
# f
multiset, (start, freq), x = reverse_lookup_then_remove(multiset, 0)
assert x == 'f'
assert (start, freq) == (0, 1)
assert multiset == ()
def test_to_sequence():
xs = ['a', 'a', 'a', 'a', 'b', 'b', 'b', 'd']
assert to_sequence(build_multiset(xs)) == sorted(xs)
def test_forward_lookup():
xs = build_multiset(4 * ['a']
+ 3 * ['b']
+ 1 * ['d'])
assert forward_lookup(xs, 'a') == (0, 4)
assert forward_lookup(xs, 'b') == (4, 3)
assert forward_lookup(xs, 'd') == (7, 1)
def test_reverse_lookup():
xs = build_multiset(4 * ['a']
+ 3 * ['b']
+ 1 * ['d'])
assert reverse_lookup(xs, 0) == ((0, 4), 'a')
assert reverse_lookup(xs, 1) == ((0, 4), 'a')
assert reverse_lookup(xs, 2) == ((0, 4), 'a')
assert reverse_lookup(xs, 3) == ((0, 4), 'a')
assert reverse_lookup(xs, 4) == ((4, 3), 'b')
assert reverse_lookup(xs, 5) == ((4, 3), 'b')
assert reverse_lookup(xs, 6) == ((4, 3), 'b')
assert reverse_lookup(xs, 7) == ((7, 1), 'd')
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from update import BasicUpdateBlock, SmallUpdateBlock
from extractor import BasicEncoder, SmallEncoder
from corr import CorrBlock, AlternateCorrBlock
from utils.utils import bilinear_sampler, coords_grid, upflow8
from utils.warp_utils import flow_warp
try:
autocast = torch.cuda.amp.autocast
except:
# dummy autocast for PyTorch < 1.6
class autocast:
def __init__(self, enabled):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if 'dropout' not in self.args:
self.args.dropout = 0
if 'alternate_corr' not in self.args:
self.args.alternate_corr = False
# feature network, context network, and update block
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=hdim+cdim, norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=hdim+cdim, norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
""" Flow is represented as difference between two coordinate grids flow = coords1 - coords0"""
N, C, H, W = img.shape
coords0 = coords_grid(N, H//8, W//8).to(img.device)
coords1 = coords_grid(N, H//8, W//8).to(img.device)
# optical flow computed as difference: flow = coords1 - coords0
return coords0, coords1
def upsample_flow(self, flow, mask):
""" Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination """
N, _, H, W = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold(8 * flow, [3,3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum(mask * up_flow, dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, 8*H, 8*W)
# code taken from back to basics
#***************************************************************
def generate_grid(B, H, W, device):
xx = torch.arange(0, W).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
grid = torch.cat((xx, yy), 1).float()
grid = torch.transpose(grid, 1, 2)
grid = torch.transpose(grid, 2, 3)
grid = grid.to(device)
return grid
def stn(self, flow, frame):
b, _, h, w = flow.shape
frame = F.interpolate(frame, size=(h, w), mode='bilinear', align_corners=False)
flow = torch.transpose(flow, 1, 2)
flow = torch.transpose(flow, 2, 3)
grid = flow + generate_grid(b, h, w, flow.device)
factor = torch.FloatTensor([[[[2 / w, 2 / h]]]]).to(flow.device)
grid = grid * factor - 1
warped_frame = F.grid_sample(frame, grid, align_corners=False)
return warped_frame
#***************************************************************
def forward(self, image1, image2, flow_gt, frame1, frame2, \
iters=12, flow_init=None, upsample=True, test_mode=False):
""" Estimate optical flow between pair of frames """
image2_orig = image2
image1 = 2 * (image1 / 255.0) - 1.0
image2 = 2 * (image2 / 255.0) - 1.0
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
# run the feature network
with autocast(enabled=self.args.mixed_precision):
fmap1, fmap2 = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = AlternateCorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
# run the context network
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
net, inp = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
coords0, coords1 = self.initialize_flow(image1)
if flow_init is not None:
coords1 = coords1 + flow_init
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1) # index correlation volume
flow = coords1 - coords0
with autocast(enabled=self.args.mixed_precision):
net, up_mask, delta_flow = self.update_block(net, inp, corr, flow)
# F(t+1) = F(t) + \Delta(t)
coords1 = coords1 + delta_flow
# upsample predictions
if up_mask is None:
flow_up = upflow8(coords1 - coords0)
else:
flow_up = self.upsample_flow(coords1 - coords0, up_mask)
flow_predictions.append(flow_up)
if test_mode:
return coords1 - coords0, flow_up
warped_images = [flow_warp(image2_orig, flow) for flow in flow_predictions]
return flow_predictions, warped_images
|
import platform
import pandas as pd
import sklearn
import numpy as np
import os
from sklearn.externals import joblib
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn import tree
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
from sklearn_pandas import DataFrameMapper
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, accuracy_score
from azureml.core import Run, Dataset, Workspace
from sklearn.metrics import accuracy_score
from azureml.contrib.interpret.explanation.explanation_client import ExplanationClient
from azureml.core.run import Run
from interpret.ext.blackbox import TabularExplainer
from azureml.contrib.interpret.visualize import ExplanationDashboard
def main():
ws = Run.get_context().experiment.workspace
os.makedirs('./outputs', exist_ok=True)
df = Dataset.get_by_name(ws,'telcochurn').to_pandas_dataframe()
df = df.dropna(how="all") # remove samples with all missing values
df["Churn_numerical"] = df["Churn"]
target = df["Churn"]
total_charges_filter = df.TotalCharges == " "
df = df[~total_charges_filter]
df.TotalCharges = pd.to_numeric(df.TotalCharges)
df = df.drop(['Churn_numerical','Churn'], axis=1)
train_model(df, target)
run = Run.get_context()
model = run.register_model(model_name='Churn_model', model_path='outputs/classifier.pkl')
def train_model(df, target):
# Creating dummy columns for each categorical feature
categorical = []
for col, value in df.iteritems():
if value.dtype == 'object':
categorical.append(col)
# Store the numerical columns in a list numerical
numerical = df.columns.difference(categorical)
numeric_transformations = [([f], Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])) for f in numerical]
categorical_transformations = [([f], OneHotEncoder(handle_unknown='ignore', sparse=False)) for f in categorical]
transformations = numeric_transformations + categorical_transformations
# Append classifier to preprocessing pipeline
clf = Pipeline(steps=[('preprocessor', DataFrameMapper(transformations)),
('classifier', LogisticRegression(solver='lbfgs'))])
# Split data into train and test
x_train, x_test, y_train, y_test = train_test_split(df,target, test_size=0.35, random_state=0, stratify=target)
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print(classification_report(y_test, y_pred))
accu = accuracy_score(y_test, y_pred)
model_file_name = 'classifier.pkl'
# save model in the outputs folder so it automatically get uploaded
with open(model_file_name, 'wb') as file:
joblib.dump(value=clf, filename=os.path.join('./outputs/', model_file_name))
run = Run.get_context()
run.log("accuracy", accu)
# we upload the model into the experiment artifact store, but do not register it as a model until unit tests are sucessfully passed in next ML step
run.upload_file(model_file_name, os.path.join('./outputs/', model_file_name))
#Interpret steps
client = ExplanationClient.from_run(run)
# Using SHAP TabularExplainer
explainer = TabularExplainer(clf.steps[-1][1],
initialization_examples=x_train,
features=df.columns,
classes=["Not leaving", "leaving"],
transformations=transformations)
# explain overall model predictions (global explanation)
global_explanation = explainer.explain_global(x_test)
# Sorted SHAP values
print('ranked global importance values: {}'.format(global_explanation.get_ranked_global_values()))
# Corresponding feature names
print('ranked global importance names: {}'.format(global_explanation.get_ranked_global_names()))
# Feature ranks (based on original order of features)
print('global importance rank: {}'.format(global_explanation.global_importance_rank))
# uploading global model explanation data for storage or visualization in webUX
# the explanation can then be downloaded on any compute
# multiple explanations can be uploaded
client.upload_model_explanation(global_explanation, comment='global explanation: all features')
# or you can only upload the explanation object with the top k feature info
#client.upload_model_explanation(global_explanation, top_k=2, comment='global explanation: Only top 2 features')
main()
|
"""Some things you just can't test as unit tests"""
import os
import subprocess
import sys
import tempfile
import unittest
import shutil
example = """
def main():
print(gcd(15, 10))
print(gcd(45, 12))
def gcd(a, b):
while b:
a, b = b, a%b
return a
"""
driver = """
from pyannotate_runtime import collect_types
if __name__ == '__main__':
collect_types.init_types_collection()
with collect_types.collect():
main()
collect_types.dump_stats('type_info.json')
"""
class_example = """
class A(object): pass
def f(x):
return x
def main():
f(A())
f(A())
"""
class IntegrationTest(unittest.TestCase):
def setUp(self):
self.savedir = os.getcwd()
os.putenv('PYTHONPATH', self.savedir)
self.tempdir = tempfile.mkdtemp()
os.chdir(self.tempdir)
def tearDown(self):
os.chdir(self.savedir)
shutil.rmtree(self.tempdir)
def test_simple(self):
with open('gcd.py', 'w') as f:
f.write(example)
with open('driver.py', 'w') as f:
f.write('from gcd import main\n')
f.write(driver)
subprocess.check_call([sys.executable, 'driver.py'])
output = subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations', 'gcd.py'])
lines = output.splitlines()
assert b'+ # type: () -> None' in lines
assert b'+ # type: (int, int) -> int' in lines
def test_auto_any(self):
with open('gcd.py', 'w') as f:
f.write(example)
output = subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations', '-a', 'gcd.py'])
lines = output.splitlines()
assert b'+ # type: () -> None' in lines
assert b'+ # type: (Any, Any) -> Any' in lines
def test_no_type_info(self):
with open('gcd.py', 'w') as f:
f.write(example)
try:
subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations', 'gcd.py'],
stderr=subprocess.STDOUT)
assert False, "Expected an error"
except subprocess.CalledProcessError as err:
assert err.returncode == 1
lines = err.output.splitlines()
assert (b"Can't open type info file: "
b"[Errno 2] No such file or directory: 'type_info.json'" in lines)
def test_package(self):
os.makedirs('foo')
with open('foo/__init__.py', 'w') as f:
pass
with open('foo/gcd.py', 'w') as f:
f.write(example)
with open('driver.py', 'w') as f:
f.write('from foo.gcd import main\n')
f.write(driver)
subprocess.check_call([sys.executable, 'driver.py'])
output = subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations', 'foo/gcd.py'])
lines = output.splitlines()
assert b'+ # type: () -> None' in lines
assert b'+ # type: (int, int) -> int' in lines
def test_subdir(self):
os.makedirs('foo')
with open('foo/gcd.py', 'w') as f:
f.write(example)
with open('driver.py', 'w') as f:
f.write('import sys\n')
f.write('sys.path.insert(0, "foo")\n')
f.write('from gcd import main\n')
f.write(driver)
subprocess.check_call([sys.executable, 'driver.py'])
output = subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations',
# Construct platform-correct pathname:
os.path.join('foo', 'gcd.py')])
lines = output.splitlines()
assert b'+ # type: () -> None' in lines
assert b'+ # type: (int, int) -> int' in lines
def test_subdir_w_class(self):
os.makedirs('foo')
with open('foo/bar.py', 'w') as f:
f.write(class_example)
with open('driver.py', 'w') as f:
f.write('import sys\n')
f.write('sys.path.insert(0, "foo")\n')
f.write('from bar import main\n')
f.write(driver)
subprocess.check_call([sys.executable, 'driver.py'])
output = subprocess.check_output([sys.executable, '-m', 'pyannotate_tools.annotations',
# Construct platform-correct pathname:
os.path.join('foo', 'bar.py')])
lines = output.splitlines()
print(b'\n'.join(lines).decode())
assert b'+ # type: () -> None' in lines
assert b'+ # type: (A) -> A' in lines
assert not any(line.startswith(b'+') and b'import' in line for line in lines)
|
<gh_stars>0
# coding: utf-8
"""
OpenLattice API
OpenLattice API # noqa: E501
The version of the OpenAPI document: 0.0.1
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openlattice.configuration import Configuration
class OrganizationExternalDatabaseTableColumnsPair(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'table': 'OrganizationExternalDatabaseTable',
'columns': 'list[OrganizationExternalDatabaseColumn]'
}
attribute_map = {
'table': 'table',
'columns': 'columns'
}
def __init__(self, table=None, columns=None, local_vars_configuration=None): # noqa: E501
"""OrganizationExternalDatabaseTableColumnsPair - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._table = None
self._columns = None
self.discriminator = None
if table is not None:
self.table = table
if columns is not None:
self.columns = columns
@property
def table(self):
"""Gets the table of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:return: The table of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:rtype: OrganizationExternalDatabaseTable
"""
return self._table
@table.setter
def table(self, table):
"""Sets the table of this OrganizationExternalDatabaseTableColumnsPair.
:param table: The table of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:type table: OrganizationExternalDatabaseTable
"""
self._table = table
@property
def columns(self):
"""Gets the columns of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:return: The columns of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:rtype: list[OrganizationExternalDatabaseColumn]
"""
return self._columns
@columns.setter
def columns(self, columns):
"""Sets the columns of this OrganizationExternalDatabaseTableColumnsPair.
:param columns: The columns of this OrganizationExternalDatabaseTableColumnsPair. # noqa: E501
:type columns: list[OrganizationExternalDatabaseColumn]
"""
self._columns = columns
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationExternalDatabaseTableColumnsPair):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, OrganizationExternalDatabaseTableColumnsPair):
return True
return self.to_dict() != other.to_dict()
|
<filename>decline_adjectives.py
import spacy
from spacy_iwnlp import spaCyIWNLP
nlp = spacy.load('de')
iwnlp = spaCyIWNLP(lemmatizer_path='case_dict/IWNLP.Lemmatizer_20181001.json')
nlp.add_pipe(iwnlp)
#doc = nlp("Wir mögen jene Fußballspiele mit jenen Verlängerungen, welche bei diesem Wetter stattfinden.")
#for token in doc:
# print('POS: {}\tIWNLP:{}'.format(token.pos_, token._.iwnlp_lemmas))
def lemmatize_adjective(token):
lem = token._.iwnlp_lemmas
#print("token:", token, "spacy: ", token.lemma_, "IWNLP: ", lem)
#if token.text.endswith('ete') or token.text.endswith('eter') or token.text.endswith('eten') or token.text.endswith('etem') or token.text.endswith('etes'):
#print("?????", token)
if lem:
lemmatized_adjective = lem[0]
else:
lemmatized_adjective = token.lemma_ #fallback-strategie: use spacy-lemmatizer
if lemmatized_adjective.endswith('e'): #TODO is this necessary?
lemmatized_adjective = lemmatized_adjective[:-1]
if lemmatized_adjective.endswith('en'): # prevent sth like verbündeter -> verbündener
#print('###########', lemmatized_adjective)
if not lemmatized_adjective in token.text:
lemmatized_adjective = lemmatized_adjective[:-1] + 't'
if not lemmatized_adjective in token.text:
lemmatized_adjective = lemmatized_adjective[:-2] + 't'
#if not lemmatized_adjective in token.text:
# print('**************')
#print("lemmatized: ", lemmatized_adjective)
#if lemmatized_adjective.endswith('ter'):
#print("!!!!!!!!!!", lemmatized_adjective[:-2])
return lemmatized_adjective
def decline_adjective_STRONG(lemmatized_adjective, case, number, gender):
#print("STRONG")
if (number == 'pl') and (case == 'dat'):
declined_adjective = lemmatized_adjective + 'en'
elif (gender == 'f') or (number == 'Pl'):
if case in ('nom', 'acc'):
declined_adjective = lemmatized_adjective + 'e'
else:
declined_adjective = lemmatized_adjective + 'er'
elif case == 'dat':
declined_adjective = lemmatized_adjective + 'em'
elif case == 'gen':
declined_adjective = lemmatized_adjective + 'en'
elif gender == 'n':
declined_adjective = lemmatized_adjective + 'es'
elif case == 'acc':
declined_adjective = lemmatized_adjective + 'en'
else:
declined_adjective = lemmatized_adjective + 'er'
#print("declined adjective strong:", declined_adjective)
return declined_adjective
def decline_adjective_WEAK(lemmatized_adjective, case, gender):
#print("WEAK")
if (case == 'nom') and (gender in ('m', 'f', 'n')):
declined_adjective = lemmatized_adjective + 'e'
elif (case == 'acc') and (gender in ('n', 'f')):
declined_adjective = lemmatized_adjective + 'e'
else:
declined_adjective = lemmatized_adjective + 'en'
# print("declined adjective weak:", declined_adjective)
return declined_adjective
def decline_adjective(token, case, number, gender, type):
lemmatized_adjective = lemmatize_adjective(token)
if type==1:
return decline_adjective_STRONG(lemmatized_adjective, case, number, gender)
else:
return decline_adjective_WEAK(lemmatized_adjective, case, gender)
#for token in doc:
# print(token.lemma_)
# if token.tag_ == 'ADJA':
# lemmatize_adjective(token) |
#!/usr/bin/env python2
# coding: utf-8
import datetime
import logging
import os
import time
import boto3
import s3transfer
from botocore.client import Config
access_key = '<KEY>'
secret_key = '<KEY>'
bucket_name = 'renzhi-test-bucket'
file_acl = 'public-read'
report_interval = 30
mega = 1024.0 * 1024.0
schedule = {
'start': '22:25',
'stop': '3:30',
}
stat = {
'bytes_uploaded': 0,
'start_time': time.time(),
'bandwidth': 10, # 10M
'report_time': time.time(),
}
config = Config(signature_version='s3v4')
s3_client = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key,
config=config,
region_name='us-east-1',
endpoint_url='http://127.0.0.1',
)
s3_transfer = s3transfer.S3Transfer(s3_client)
def filter_dir(dir_name):
if dir_name.startswith('.'):
return False
return True
def filter_file(file_name):
if file_name.startswith('.'):
return False
return True
def get_iso_now():
datetime_now = datetime.datetime.utcnow()
return datetime_now.strftime('%Y%m%dT%H%M%SZ')
def dir_iter(dir_name):
q = []
base_dir = dir_name.split('/')
q.append(base_dir)
while True:
if len(q) < 1:
break
dir_parts = q.pop(0)
files = os.listdir('/'.join(dir_parts))
for f in files:
_dir_parts = dir_parts[:]
_dir_parts.append(f)
if filter_dir(f) == False:
continue
if os.path.isdir('/'.join(_dir_parts)):
q.append(_dir_parts)
yield dir_parts
def get_files_to_upload(dir_name, progress_file):
files = os.listdir(dir_name)
files_to_upload = {}
for f in files:
if filter_file(f) == False:
continue
file_name = os.path.join(dir_name, f)
files_to_upload[file_name] = True
fd = open(progress_file, 'a')
fd.close()
fd = open(progress_file)
while True:
line = fd.readline()
if line == '':
break
file_name = line.split()[0]
if file_name in files_to_upload:
files_to_upload.pop(file_name)
fd.close()
return files_to_upload
def upload_one_file(file_name, base_len, key_prefix):
file_parts = file_name.split('/')
key = os.path.join(key_prefix, '/'.join(file_parts[base_len:]))
info = {}
try:
key = key.encode('utf-8')
except Exception as e:
logger.error('failed to encode the key: ' + repr(e))
return
if os.path.isdir(file_name):
info['local_size'] = 0
key = key + '/'
resp = s3_client.put_object(
ACL=file_acl,
Bucket=bucket_name,
Key=key,
Body=''
)
status = resp['ResponseMetadata']['HTTPStatusCode']
if status != 200:
logger.error('failed to put object: %s %d' % (key, status))
return
resp = s3_client.head_object(
Bucket=bucket_name,
Key=key
)
status = resp['ResponseMetadata']['HTTPStatusCode']
if status != 200:
logger.error('failed to put object: %s %d' % (key, status))
return
info['file_key'] = key
info['etag'] = resp['ETag']
info['resp_size'] = resp['ContentLength']
else:
info['local_size'] = os.stat(file_name).st_size
s3_transfer.upload_file(file_name, bucket_name,
key, extra_args={'ACL': file_acl})
resp = s3_client.head_object(
Bucket=bucket_name,
Key=key
)
status = resp['ResponseMetadata']['HTTPStatusCode']
if status != 200:
logger.error('failed to put object: %s %d' % (key, status))
return
info['file_key'] = key
info['etag'] = resp['ETag']
info['resp_size'] = resp['ContentLength']
info['upload_time'] = get_iso_now()
return info
def upload_one_directory(dir_parts, base_len, key_prefix):
dir_name = '/'.join(dir_parts)
progress_file = os.path.join(dir_name, '.upload_progress_')
files_to_upload = get_files_to_upload(dir_name, progress_file)
fd = open(progress_file, 'a')
for file_name in files_to_upload.keys():
logger.warn('start to upload file: %s' % file_name)
info = upload_one_file(file_name, base_len, key_prefix)
if info == None:
continue
if info['local_size'] != info['resp_size']:
logger.error('file size not equal, local_size: %d, response size: %d'
% (info['local_size'], info['resp_size']))
upload_time = get_iso_now()
line = '%s %s %s %d %d %s\n' % (file_name, info['file_key'], info['etag'],
info['local_size'], info['resp_size'], upload_time)
fd.write(line)
stat['bytes_uploaded'] = stat['bytes_uploaded'] + info['local_size']
time_need = stat['bytes_uploaded'] / (mega * stat['bandwidth'])
ts_now = time.time()
time_to_sleep = stat['start_time'] + time_need - ts_now
if ts_now - stat['report_time'] > report_interval:
stat['report_time'] = ts_now
time_used = ts_now - stat['start_time']
report_str = ('upload stat, bytes uploaded: %dM, time used: %fs, bandwidth: %f Mbytes/s'
% (stat['bytes_uploaded'] / mega, time_used,
stat['bytes_uploaded'] / time_used / mega))
logger.warn(report_str)
print report_str
if time_to_sleep > 0:
logger.warn('about to sleep %f seconds to slow down' %
time_to_sleep)
time.sleep(time_to_sleep)
check_schedule()
fd.close()
def run(dir_name, key_prefix):
if dir_name.endswith('/'):
print 'do not add / to the directory name: ' + dir_name
return
if not dir_name.startswith('/'):
print 'the directory name is not absolute path: ' + dir_name
return
if not os.path.exists(dir_name) or not os.path.isdir(dir_name):
print dir_name + ' is not exists or is not a directory'
return
base_len = len(dir_name.split('/')) - 1
print 'start to upload ' + dir_name + ' to ' + key_prefix
for dir_parts in dir_iter(dir_name):
upload_one_directory(dir_parts, base_len, key_prefix)
def check_schedule():
start_h = int(schedule['start'].split(':')[0])
start_m = int(schedule['start'].split(':')[1])
stop_h = int(schedule['stop'].split(':')[0])
stop_m = int(schedule['stop'].split(':')[1])
start_m = start_m + start_h * 60
stop_m = stop_m + stop_h * 60
while True:
now = datetime.datetime.now()
now_h = now.hour
now_m = now.minute
now_m = now_m + now_h * 60
if start_m < stop_m:
if now_m >= start_m and now_m <= stop_m:
return
else:
wait_m = (start_m - now_m) % (60 * 24)
line = 'the schedule is from %s to %s, need to wait %d hours and %d minutes' % (
schedule['start'], schedule['stop'], wait_m / 60, wait_m % 60)
print line
logger.warn(line)
time.sleep(60)
else:
if now_m > stop_m and now_m < start_m:
wait_m = (start_m - now_m) % (60 * 24)
line = 'the schedule is from %s to %s, need to wait %d hours and %d minutes' % (
schedule['start'], schedule['stop'], wait_m / 60, wait_m % 60)
print line
logger.warn(line)
time.sleep(60)
else:
return
if __name__ == "__main__":
import sys
dir_name = sys.argv[1]
key_prefix = sys.argv[2]
log_dir = sys.argv[3]
bandwidth = sys.argv[4]
stat['bandwidth'] = float(bandwidth)
if not os.path.exists(log_dir) or not os.path.isdir(log_dir):
print log_dir + ' is not exists or is not a directory'
exit()
log_file = os.path.join(log_dir, 'upload-log-for-' +
dir_name.replace('/', '_') + '.log')
logger = logging.getLogger()
logger.setLevel(logging.WARN)
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
run(dir_name, key_prefix)
|
import os
import io
import ast
import inspect
import pandas as pd
import numpy as np
from collections import deque, Counter
class EndNode():
def __init__(self):
"""
represent the end of program
"""
self._fields = ""
def __str__(self):
return '_ast.Program_End'
class DummyNode():
def __init__(self):
"""
represent the dummpy node
"""
self._fields = ""
def __str__(self):
return '_ast.Dummy_Node'
"""
traverse the Abstact Syntax Tree and collect the AST nodes using
dfs algorithm.
"""
class TraverseAST():
@classmethod
def ast_neighbors(cls, node):
if not node:
return []
if not isinstance(node, ast.AST):
return []
neighbor_nodes = []
for attr in node._fields:
if attr not in ['body', 'orelse']:
continue
attr_node = getattr(node, attr)
if isinstance(attr_node, ast.AST):
neighbor_nodes.append([attr_node])
if isinstance(attr_node, list):
neighbor_nodes.append(attr_node)
return neighbor_nodes
@classmethod
def adjacency_list(cls, root):
adj_list = {}
if not root:
return adj_list
return_node = EndNode() # denote the end of logic graph
node_list = []
queue = deque([root])
while queue:
node = queue.popleft() # FIFO
if not isinstance(node, ast.AST):
continue
if isinstance(node, ast.Return):
continue
# the next node of current root node
next_node = None
if node in adj_list and adj_list[node]:
next_node = adj_list[node][0]
# dummy node indicate the end of subgraph: for/while/if etc
dummy_node = None
if isinstance(node, (ast.For, ast.While)):
dummy_node = DummyNode()
# build adjacency list
neighbors = cls.ast_neighbors(node)
for neighbor_group in neighbors:
current_node = node
# add the dummy node and subgraph end node to the neighbor list
pop_count = 0
if dummy_node:
neighbor_group.append(dummy_node)
adj_list[dummy_node] = deque([node])
pop_count += 1
if next_node:
neighbor_group.append(next_node)
pop_count += 1
# iterate the neighbor and insert edges
for adj_node in neighbor_group:
if isinstance(current_node, ast.Return):
continue
tmp_list = adj_list.get(current_node, deque([]))
if isinstance(adj_node, ast.Return):
tmp_list.append(return_node)
else:
tmp_list.append(adj_node)
adj_list[current_node] = tmp_list
current_node = adj_node
# remove dummy and return nodes
for _ in range(pop_count):
neighbor_group.pop()
# add the nodes to the head of the queue
neighbor_group.reverse()
queue.extendleft(neighbor_group)
return adj_list
def arw_embedding(source_code, min_length = 5, max_length = 20, measure_step = 5, sample_number = 1500):
# init ARW parameters
walk_length = min_length
walk_samples = {}
while walk_length <= max_length:
walk_samples[walk_length] = [0] * walk_length
walk_length += measure_step
# parse graph
node_list = {}
node_choices = []
try:
root = ast.parse(source_code, mode='exec')
node_list = TraverseAST.adjacency_list(root)
node_choices = list(node_list.keys())
except Exception:
pass
# ARW
if not node_choices:
embedding = []
for _, values in walk_samples.items():
embedding.extend(values)
return embedding
for _ in range(sample_number):
index = 0
current_node = np.random.choice(node_choices)
visited = {current_node}
for path_length in range(1, walk_length):
if current_node in node_list:
neighbors = list(node_list[current_node])
else:
neighbors = list(node_list.keys())
#visited = set([])
current_node = np.random.choice(neighbors)
if current_node not in visited:
index += 1
visited.add(current_node)
if path_length + 1 in walk_samples:
key = path_length - index
walk_samples[path_length + 1][key] += 1
# output result
embedding = []
for _, values in walk_samples.items():
embedding.extend(values)
return embedding
class FeaturePipeline():
def __init__(self, root):
if not root:
raise ValueError('> AST root node can not be empty.')
self.node_list = self._dfs(root)
self.call_count = {}
self.ifs_count = 0
self.loop_count = 0
self.break_count = 0
self.continue_count = 0
self.variables = set([])
self.recursions = 0
self.loop_loop_count = 0
self.loop_cond_count = 0
self.nested_loop_depth = 0
self.cond_loop_count = 0
self.cond_cond_count = 0
self.loop_statement_count = 0
self.loop_fun_call_count = 0
self.loop_return_count = 0
def _countable_features(self):
for node in self.node_list:
# func call counts
if isinstance(node, ast.Call):
self._count_func_call(node)
# if count
if isinstance(node, ast.If):
self.ifs_count += 1
self._cond_features(node)
# loop count
if isinstance(node, (ast.For, ast.While)):
self.loop_count += 1
self._loop_features(node)
# break count
if isinstance(node, ast.Break):
self.break_count += 1
# continue count
if isinstance(node, ast.Continue):
self.continue_count += 1
# variable count
if isinstance(node, ast.Assign):
self._count_variable(node)
# check recursion
if isinstance(node, ast.FunctionDef):
self.recursions = self._has_recursion(node)
def __call__(self):
self._countable_features()
call_count = sum(self.call_count.values())
return [
len(self.call_count),
call_count,
self.ifs_count,
self.loop_count,
self.break_count,
self.continue_count,
len(self.variables),
self.recursions,
len(self.node_list),
self.nested_loop_depth,
self.loop_loop_count / float(self.loop_count) if self.loop_count > 0 else 0,
self.loop_cond_count / float(self.loop_count) if self.loop_count > 0 else 0,
self.cond_cond_count / float(self.ifs_count) if self.ifs_count > 0 else 0,
self.cond_loop_count / float(self.ifs_count) if self.ifs_count > 0 else 0,
self.loop_statement_count / float(len(self.node_list)) if self.node_list else 0,
self.loop_fun_call_count / float(call_count) if call_count > 0 else 0,
self.loop_return_count
]
def _cond_features(self, root):
if not hasattr(root, 'body'):
return
loop_list = self._dfs(root, node_type = (ast.For, ast.While, ast.If))
loop_count = 0
cond_count = 0
for node in loop_list:
if isinstance(node, (ast.For, ast.While)):
loop_count += 1
if isinstance(node, ast.If):
cond_count += 1
self.cond_loop_count += (1 if loop_count > 0 else 0)
self.cond_cond_count += (1 if cond_count > 1 else 0)
def _loop_features(self, root):
if not hasattr(root, 'body'):
return
loop_list = self._dfs(root)
loop_count = 0
cond_count = 0
for node in loop_list:
if isinstance(node, (ast.For, ast.While)):
loop_count += 1
if isinstance(node, ast.If):
cond_count += 1
if isinstance(node, ast.Call):
self.loop_fun_call_count += 1
if isinstance(node, ast.Return):
self.loop_return_count += 1
self.loop_statement_count += 1
self.nested_loop_depth = max(loop_count, self.nested_loop_depth)
self.loop_loop_count += (1 if loop_count > 1 else 0)
self.loop_cond_count += (1 if cond_count > 0 else 0)
def _count_variable(self, node):
if not hasattr(node, 'targets'):
return
for var in node.targets:
if not isinstance(var, ast.Name):
continue
self.variables.add(var.id)
def _count_func_call(self, node):
if isinstance(node.func, ast.Name):
self.call_count[node.func.id] = self.call_count.get(node.func.id, 0) + 1
if isinstance(node.func, ast.Attribute):
self.call_count[node.func.attr] = self.call_count.get(node.func.attr, 0) + 1
def _has_recursion(self, root):
if not hasattr(root, 'body'):
return
call_list = self._dfs(root, node_type=(ast.Name, ast.Attribute))
call_count = {}
for node in call_list:
if isinstance(node, ast.Name):
call_count[node.id] = call_count.get(node.id, 0) + 1
if isinstance(node, ast.Attribute):
call_count[node.attr] = call_count.get(node.attr, 0) + 1
if root.name not in call_count:
return 0
return call_count[root.name]
"""
"
" traverse the node using the dfs algorithm
"
"""
def _dfs(self, root, node_type=(ast.AST)):
if not root:
return node_list
node_list = []
queue = deque([root])
while queue:
node = queue.pop() # FIFO
if isinstance(node, node_type):
node_list.append(node)
queue.extend(self._ast_neighbors(node))
return node_list
def _ast_neighbors(self, node):
if not node:
return []
if not isinstance(node, ast.AST):
return []
neighbor_nodes = []
for attr in node._fields:
attr_node = getattr(node, attr)
if isinstance(attr_node, ast.AST):
neighbor_nodes.append(attr_node)
if isinstance(attr_node, list):
neighbor_nodes.extend(attr_node)
return neighbor_nodes
def code_pattern_embedding(source_code):
root = ast.parse(source_code, mode='exec')
fp = FeaturePipeline(root)
fp_emb = fp()
return fp_emb |
<filename>src/VerbalizationSpace.py
#!/usr/bin/env python3
# coding=utf-8
#######################################################################################
# Copyright (c) 2022, <NAME>, <NAME>, <NAME> - King's College London
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#######################################################################################
# Author: <NAME> <<EMAIL>>, King's College London
from enum import Enum, IntEnum
class VerbalizationSpace:
def __init__(self, abstraction, locality, specificity, explanation, locality_range=None, locality_object=None):
self.abstraction = Abstraction(abstraction) if type(abstraction) is int else abstraction
self.locality = Locality(locality) if type(locality) is int else locality
self.locality.set_range(locality_range)
self.locality.set_object(locality_object)
self.specificity = Specificity(specificity) if type(specificity) is int else specificity
self.explanation = Explanation(explanation) if type(explanation) is int else explanation
@classmethod
def from_params_srv(cls, request_msg):
abstraction = Abstraction(request_msg.abstraction)
locality = Locality(request_msg.locality)
if locality == Locality.OBJECT:
locality.set_object(request_msg.locality_object_name)
if locality == Locality.RANGE:
if request_msg.locality_min >= request_msg.locality_max:
raise ValueError('Locality min range must be smaller than max range')
locality.set_range((request_msg.locality_min, request_msg.locality_max))
specificity = Specificity(request_msg.specificity)
explanation = Explanation(request_msg.explanation)
return cls(abstraction, locality, specificity, explanation, locality.range, locality.object)
class Abstraction(IntEnum):
LEV1 = 1
LEV2 = 2
LEV3 = 3
LEV4 = 4
class Locality(Enum):
ALL = 1
RANGE = 2
OBJECT = 3
def __init__(self, val):
Enum.__init__(val)
self.range = None
self.object = None
def set_range(self, plan_range):
if self == self.RANGE:
self.range = plan_range
def get_range(self):
# If All will return a full slice
if self.range:
return slice(*self.range)
return None
def set_object(self, object):
if self == self.OBJECT:
self.object = object
def get_object(self):
return self.object
class Specificity(Enum):
GENERAL_PICTURE = 1
SUMMARY = 2
DETAILED_NARRATIVE = 3
class Explanation(IntEnum):
LEV1 = 1
LEV2 = 2
LEV3 = 3
LEV4 = 4
LEV5 = 5
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.task import scenario
from rally.plugins.openstack.scenarios.vm import utils as vm_utils
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.task import types
from rally.task import validation
from rally.common import sshutils
import time
import StringIO
import csv
import json
import datetime
import logging
from Elastic import Elastic
LOG = logging.getLogger(__name__)
class BrowbeatPlugin(neutron_utils.NeutronScenario,
vm_utils.VMScenario,
scenario.Scenario):
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["nova", "neutron", "cinder"],
"keypair": {}, "allow_ssh": {}})
def nova_create_pbench_uperf(
self,
image,
flavor,
zones,
user,
password,
test_types,
protocols,
samples,
external,
test_name,
send_results=True,
num_pairs=1,
elastic_host=None,
elastic_port=None,
cloudname=None,
**kwargs):
pbench_path = "/opt/pbench-agent"
pbench_results = "/var/lib/pbench-agent"
# Create env
router = self._create_router({}, external_gw=external)
network = self._create_network({})
subnet = self._create_subnet(network, {})
kwargs["nics"] = [{'net-id': network['network']['id']}]
self._add_interface_router(subnet['subnet'], router['router'])
# Launch pbench-jump-host
jh, jip = self._boot_server_with_fip(image,
flavor,
use_floating_ip=True,
floating_network=external['name'],
key_name=self.context["user"]["keypair"]["name"],
**kwargs)
servers = []
clients = []
# Launch Guests
if num_pairs is 1:
server = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
availability_zone=zones['server'],
**kwargs)
client = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
availability_zone=zones['client'],
**kwargs)
# IP Addresses
servers.append(
str(server.addresses[network['network']['name']][0]["addr"]))
clients.append(
str(client.addresses[network['network']['name']][0]["addr"]))
else:
for i in range(num_pairs):
server = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
availability_zone=zones['server'],
**kwargs)
client = self._boot_server(
image,
flavor,
key_name=self.context["user"]["keypair"]["name"],
availability_zone=zones['client'],
**kwargs)
# IP Addresses
servers.append(
str(server.addresses[network['network']['name']][0]["addr"]))
clients.append(
str(client.addresses[network['network']['name']][0]["addr"]))
# Wait for ping
self._wait_for_ping(jip['ip'])
# Open SSH Connection
jump_ssh = sshutils.SSH(user, jip['ip'], 22, self.context[
"user"]["keypair"]["private"], password)
# Check for connectivity
self._wait_for_ssh(jump_ssh)
# Write id_rsa to get to guests.
self._run_command_over_ssh(jump_ssh, {'remote_path': "mkdir ~/.ssh"})
jump_ssh.run(
"cat > ~/.ssh/id_rsa",
stdin=self.context["user"]["keypair"]["private"])
self._run_command_over_ssh(jump_ssh,
{'remote_path': "chmod 0600 ~/.ssh/id_rsa"})
# Check status of guest
ready = False
retry = 5
while (not ready):
for sip in servers + clients:
cmd = "ssh -o StrictHostKeyChecking=no {}@{} /bin/true".format(
user, sip)
s1_exitcode, s1_stdout, s1_stderr = jump_ssh.execute(cmd)
if retry < 1:
LOG.error(
"Error : Issue reaching {} the guests through the Jump host".format(sip))
return 1
if s1_exitcode is 0:
ready = True
else:
retry = retry - 1
time.sleep(5)
# Register pbench across FIP
for sip in servers + clients:
cmd = "{}/util-scripts/pbench-register-tool-set --remote={}".format(
pbench_path, sip)
self._run_command_over_ssh(jump_ssh, {'remote_path': cmd})
# Quick single test
# debug = "--message-sizes=1024 --instances=1"
debug = None
# Start uperf against private address
uperf = "{}/bench-scripts/pbench-uperf --clients={} --servers={} --samples={} {}".format(
pbench_path, ','.join(clients), ','.join(servers), samples, debug)
uperf += " --test-types={} --protocols={} --config={}".format(
test_types,
protocols,
test_name)
# Execute pbench-uperf
# execute returns, exitcode,stdout,stderr
LOG.info("Starting Rally - PBench UPerf")
exitcode, stdout_uperf, stderr = self._run_command_over_ssh(
jump_ssh, {"remote_path": uperf})
# Prepare results
cmd = "cat {}/uperf_{}*/result.csv".format(pbench_results, test_name)
exitcode, stdout, stderr = self._run_command_over_ssh(
jump_ssh, {'remote_path': cmd})
if send_results and exitcode is not 1:
cmd = "cat {}/uperf_{}*/result.json".format(
pbench_results, test_name)
exitcode, stdout_json, stderr = self._run_command_over_ssh(
jump_ssh, {'remote_path': cmd})
es_ts = datetime.datetime.utcnow()
config = {
'elasticsearch': {
'host': elastic_host, 'port': elastic_port}, 'browbeat': {
'cloud_name': cloudname, 'timestamp': es_ts}}
elastic = Elastic(config, 'pbench')
json_result = StringIO.StringIO(stdout_json)
json_data = json.load(json_result)
for iteration in json_data:
elastic.index_result(iteration)
else:
LOG.error("Error with PBench Results")
# Parse results
result = StringIO.StringIO('\n'.join(stdout.split('\n')[1:]))
creader = csv.reader(result)
report = []
for row in creader:
if len(row) >= 1:
report.append(["aggregate.{}".format(row[1]), float(row[2])])
report.append(["single.{}".format(row[1]), float(row[3])])
if len(report) > 0:
self.add_output(
additive={"title": "PBench UPerf Stats",
"description": "PBench UPerf Scenario",
"chart_plugin": "StatsTable",
"axis_label": "Gbps",
"label": "Gbps",
"data": report})
cmd = "{}/util-scripts/pbench-move-results".format(pbench_path)
self._run_command_over_ssh(jump_ssh, {"remote_path": cmd})
|
#!/usr/bin/env python3
import os
import argparse
import numpy as np
import openml
from autosklearn.classification import AutoSklearnClassifier
from autosklearn.metrics import balanced_accuracy
from remove_dataset_from_metadata import remove_dataset
import score_ensemble
def load_task(task_id):
"""Function used for loading data."""
task = openml.tasks.get_task(task_id)
X, y = task.get_X_and_y()
train_indices, test_indices = task.get_train_test_split_indices()
X_train = X[train_indices]
y_train = y[train_indices]
X_test = X[test_indices]
y_test = y[test_indices]
dataset = openml.datasets.get_dataset(task.dataset_id)
_, _, cat = dataset.get_data(return_categorical_indicator=True,
target=task.target_name)
del _
del dataset
cat = ['categorical' if c else 'numerical' for c in cat]
unique = np.unique(y_train)
mapping = {unique_value: i for i, unique_value in enumerate(unique)}
y_train = np.array([mapping[value] for value in y_train])
y_test = np.array([mapping[value] for value in y_test])
return X_train, y_train, X_test, y_test, cat
def run_experiment(working_directory,
time_limit,
per_run_time_limit,
task_id,
seed,
use_metalearning,
):
# set this to local dataset cache
# openml.config.cache_directory = os.path.join(working_directory, "../cache")
seed_dir = os.path.join(working_directory, str(seed))
try:
os.makedirs(seed_dir)
except Exception:
print("Directory {0} aleardy created.".format(seed_dir))
tmp_dir = os.path.join(seed_dir, str(task_id))
# With metalearning
if use_metalearning is True:
# path to the original metadata directory.
metadata_directory = os.path.abspath(os.path.dirname(__file__))
metadata_directory = os.path.join(metadata_directory,
"../../../autosklearn/metalearning/files/")
# Create new metadata directory not containing task_id.
new_metadata_directory = os.path.abspath(os.path.join(working_directory,
"metadata_%i" % task_id))
try:
os.makedirs(new_metadata_directory)
except OSError:
pass # pass because new metadata is created for this task.
# remove the given task id from metadata directory.
remove_dataset(metadata_directory, new_metadata_directory, task_id)
automl_arguments = {
'time_left_for_this_task': time_limit,
'per_run_time_limit': per_run_time_limit,
'initial_configurations_via_metalearning': 25,
'ensemble_size': 0,
'seed': seed,
'memory_limit': 3072,
'resampling_strategy': 'holdout',
'resampling_strategy_arguments': {'train_size': 0.67},
'tmp_folder': tmp_dir,
'delete_tmp_folder_after_terminate': False,
'disable_evaluator_output': False,
'metadata_directory': new_metadata_directory
}
# Without metalearning
else:
automl_arguments = {
'time_left_for_this_task': time_limit,
'per_run_time_limit': per_run_time_limit,
'initial_configurations_via_metalearning': 0,
'ensemble_size': 0,
'seed': seed,
'memory_limit': 3072,
'resampling_strategy': 'holdout',
'resampling_strategy_arguments': {'train_size': 0.67},
'tmp_folder': tmp_dir,
'delete_tmp_folder_after_terminate': False,
'disable_evaluator_output': False,
}
automl = AutoSklearnClassifier(**automl_arguments)
X_train, y_train, X_test, y_test, cat = load_task(task_id)
automl.fit(X_train, y_train,
dataset_name=str(task_id),
X_test=X_test, y_test=y_test,
metric=balanced_accuracy)
def main(working_directory,
output_file,
task_id,
seed,
model,
time_limit,
per_run_time_limit):
# vanilla and metalearning must be called first before ensemble and
# meta_ensemble can be called, respectively.
if model == "vanilla":
run_experiment(working_directory,
time_limit,
per_run_time_limit,
task_id,
seed,
use_metalearning=False,
)
score_ensemble.main(working_directory,
output_file,
task_id,
seed,
ensemble_size=1,
)
elif model == "metalearning":
run_experiment(working_directory,
time_limit,
per_run_time_limit,
task_id,
seed,
use_metalearning=True,
)
score_ensemble.main(working_directory,
output_file,
task_id,
seed,
ensemble_size=1,
)
else:
score_ensemble.main(working_directory,
output_file,
task_id,
seed,
ensemble_size=50,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--working-directory', type=str, required=True)
parser.add_argument("--output-file", type=str, required=True)
parser.add_argument("--time-limit", type=int, required=True)
parser.add_argument("--per-runtime-limit", type=int, required=True)
parser.add_argument('--task-id', type=int, required=True)
parser.add_argument('-s', '--seed', type=int)
parser.add_argument("--model", type=str, required=True)
args = parser.parse_args()
working_directory = args.working_directory # logdir/vanilla or logdir/metalearning
output_file = args.output_file
task_id = args.task_id
seed = args.seed
model = args.model
time_limit = args.time_limit
per_run_time_limit = args.per_runtime_limit
main(working_directory,
output_file,
task_id,
seed,
model,
time_limit,
per_run_time_limit,
)
|
<gh_stars>0
"""Implementation of regularized Hough matching algorithm (RHM)"""
import math
import torch.nn.functional as F
import torch
from . import geometry
def appearance_similarity(src_feats, trg_feats, cosd=3):
r"""Semantic appearance similarity (exponentiated cosine)"""
src_feat_norms = torch.norm(src_feats, p=2, dim=1).unsqueeze(1)
trg_feat_norms = torch.norm(trg_feats, p=2, dim=1).unsqueeze(0)
sim = torch.matmul(src_feats, trg_feats.t()) / \
torch.matmul(src_feat_norms, trg_feat_norms)
sim = torch.pow(torch.clamp(sim, min=0), cosd)
return sim
def hspace_bin_ids(src_imsize, src_box, trg_box, hs_cellsize, nbins_x):
r"""Compute Hough space bin id for the subsequent voting procedure"""
src_ptref = torch.tensor(src_imsize, dtype=torch.float).to(src_box.device)
src_trans = geometry.center(src_box)
trg_trans = geometry.center(trg_box)
xy_vote = (src_ptref.unsqueeze(0).expand_as(src_trans) - src_trans).unsqueeze(2).\
repeat(1, 1, len(trg_box)) + \
trg_trans.t().unsqueeze(0).repeat(len(src_box), 1, 1)
bin_ids = (xy_vote / hs_cellsize).long()
return bin_ids[:, 0, :] + bin_ids[:, 1, :] * nbins_x
def build_hspace(src_imsize, trg_imsize, ncells):
r"""Build Hough space where voting is done"""
hs_width = src_imsize[0] + trg_imsize[0]
hs_height = src_imsize[1] + trg_imsize[1]
hs_cellsize = math.sqrt((hs_width * hs_height) / ncells)
nbins_x = int(hs_width / hs_cellsize) + 1
nbins_y = int(hs_height / hs_cellsize) + 1
return nbins_x, nbins_y, hs_cellsize
def rhm(src_hyperpixels, trg_hyperpixels, hsfilter, ncells=8192):
r"""Regularized Hough matching"""
# Unpack hyperpixels
src_hpgeomt, src_hpfeats, src_imsize = src_hyperpixels
trg_hpgeomt, trg_hpfeats, trg_imsize = trg_hyperpixels
# Prepare for the voting procedure
votes = appearance_similarity(src_hpfeats, trg_hpfeats)
nbins_x, nbins_y, hs_cellsize = build_hspace(src_imsize, trg_imsize, ncells)
bin_ids = hspace_bin_ids(src_imsize, src_hpgeomt, trg_hpgeomt, hs_cellsize, nbins_x)
hspace = src_hpgeomt.new_zeros((len(votes), nbins_y * nbins_x))
# Proceed voting
hbin_ids = bin_ids.add(torch.arange(0, len(votes)).to(src_hpgeomt.device).
mul(hspace.size(1)).unsqueeze(1).expand_as(bin_ids))
hspace = hspace.view(-1).index_add(0, hbin_ids.view(-1), votes.view(-1)).view_as(hspace)
hspace = torch.sum(hspace, dim=0)
# Aggregate the voting results
hspace = F.conv2d(hspace.view(1, 1, nbins_y, nbins_x),
hsfilter.unsqueeze(0).unsqueeze(0), padding=3).view(-1)
return votes * torch.index_select(hspace, dim=0, index=bin_ids.view(-1)).view_as(votes)
|
<reponame>timgates42/statsmodels<gh_stars>1-10
# -*- coding: utf-8 -*-
"""Examples of non-linear functions for non-parametric regression
Created on Sat Jan 05 20:21:22 2013
Author: <NAME>
"""
import numpy as np
## Functions
def fg1(x):
'''Fan and Gijbels example function 1
'''
return x + 2 * np.exp(-16 * x**2)
def fg1eu(x):
'''Eubank similar to Fan and Gijbels example function 1
'''
return x + 0.5 * np.exp(-50 * (x - 0.5)**2)
def fg2(x):
'''Fan and Gijbels example function 2
'''
return np.sin(2 * x) + 2 * np.exp(-16 * x**2)
def func1(x):
'''made up example with sin, square
'''
return np.sin(x * 5) / x + 2. * x - 1. * x**2
## Classes with Data Generating Processes
doc = {'description':
'''Base Class for Univariate non-linear example
Does not work on it's own.
needs additional at least self.func
''',
'ref': ''}
class _UnivariateFunction(object):
#Base Class for Univariate non-linear example.
#Does not work on it's own. needs additionally at least self.func
__doc__ = '''%(description)s
Parameters
----------
nobs : int
number of observations to simulate
x : None or 1d array
If x is given then it is used for the exogenous variable instead of
creating a random sample
distr_x : None or distribution instance
Only used if x is None. The rvs method is used to create a random
sample of the exogenous (explanatory) variable.
distr_noise : None or distribution instance
The rvs method is used to create a random sample of the errors.
Attributes
----------
x : ndarray, 1-D
exogenous or explanatory variable. x is sorted.
y : ndarray, 1-D
endogenous or response variable
y_true : ndarray, 1-D
expected values of endogenous or response variable, i.e. values of y
without noise
func : callable
underlying function (defined by subclass)
%(ref)s
''' #% doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None:
if distr_x is None:
x = np.random.normal(loc=0, scale=self.s_x, size=nobs)
else:
x = distr_x.rvs(size=nobs)
x.sort()
self.x = x
if distr_noise is None:
noise = np.random.normal(loc=0, scale=self.s_noise, size=nobs)
else:
noise = distr_noise.rvs(size=nobs)
if hasattr(self, 'het_scale'):
noise *= self.het_scale(self.x)
#self.func = fg1
self.y_true = y_true = self.func(x)
self.y = y_true + noise
def plot(self, scatter=True, ax=None):
'''plot the mean function and optionally the scatter of the sample
Parameters
----------
scatter: bool
If true, then add scatterpoints of sample to plot.
ax : None or matplotlib axis instance
If None, then a matplotlib.pyplot figure is created, otherwise
the given axis, ax, is used.
Returns
-------
Figure
This is either the created figure instance or the one associated
with ax if ax is given.
'''
if ax is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
if scatter:
ax.plot(self.x, self.y, 'o', alpha=0.5)
xx = np.linspace(self.x.min(), self.x.max(), 100)
ax.plot(xx, self.func(xx), lw=2, color='b', label='dgp mean')
return ax.figure
doc = {'description':
'''Fan and Gijbels example function 1
linear trend plus a hump
''',
'ref':
'''
References
----------
Fan, Jianqing, and <NAME>. 1992. "Variable Bandwidth and Local
Linear Regression Smoothers."
The Annals of Statistics 20 (4) (December): 2008-2036. doi:10.2307/2242378.
'''}
class UnivariateFanGijbels1(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.7
self.func = fg1
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
doc['description'] =\
'''Fan and Gijbels example function 2
sin plus a hump
'''
class UnivariateFanGijbels2(_UnivariateFunction):
__doc__ = _UnivariateFunction.__doc__ % doc
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
self.s_x = 1.
self.s_noise = 0.5
self.func = fg2
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFanGijbels1EU(_UnivariateFunction):
'''
Eubank p.179f
'''
def __init__(self, nobs=50, x=None, distr_x=None, distr_noise=None):
if distr_x is None:
from scipy import stats
distr_x = stats.uniform
self.s_noise = 0.15
self.func = fg1eu
super(self.__class__, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
class UnivariateFunc1(_UnivariateFunction):
'''
made up, with sin and quadratic trend
'''
def __init__(self, nobs=200, x=None, distr_x=None, distr_noise=None):
if x is None and distr_x is None:
from scipy import stats
distr_x = stats.uniform(-2, 4)
else:
nobs = x.shape[0]
self.s_noise = 2.
self.func = func1
super(UnivariateFunc1, self).__init__(nobs=nobs, x=x,
distr_x=distr_x,
distr_noise=distr_noise)
def het_scale(self, x):
return np.sqrt(np.abs(3+x))
|
#!/usr/bin/env python3
#
# (c) 2020 <NAME> <<EMAIL>>
#
# Please let me know about your use case of this code!
import argparse
import chevron
import json
import re
import requests
import os
from os import path
import subprocess
from templates import *
def optionalize(name, optional=True):
return 'Option<{}>'.format(name) if optional else name
def replace_keywords(name):
return {
'type': 'typ',
'enum': 'enums',
}.get(name, name)
def capitalize_first(name):
if len(name) == 0:
return name
return name[0].upper() + name[1:]
def rust_identifier(name):
def sanitize(s):
return s.replace('$', 'dollar').replace('#', 'hash').replace('.', '_')
def r(c):
if not c.isupper():
return c
return '_' + c.lower()
return ''.join([(r(c) if i > 0 else c.lower()) for i, c in enumerate(sanitize(name))])
def snake_to_camel(name):
dest = []
capitalize = True
for c in name:
if c == '_':
capitalize = True
continue
if capitalize:
dest.append(c.upper())
capitalize = False
continue
dest.append(c)
return ''.join(dest)
def global_params_name(api_name):
return snake_to_camel(api_name + 'Params')
def parse_schema_types(name, schema, optional=True, parents=[]):
"""Translate a JSON schema type into Rust types, recursively.
This function takes a schema entry from the `schemas` section of a Discovery document,
and generates all Rust structs needed to represent the schema, recursively.
Arguments:
name: Name of the property. If the property is an object with fixed fields, generate a struct with this name.
schema: A JSON object from a discovery document representing a type.
Returns:
(tuple, [dict])
where type is a tuple where the first element is a Rust type and the
second element is a comment detailing the use of the field. The list of
dicts returned as second element are any structs that need to be separately
implemented and that the generated struct (if it was a struct) depends
on. The dict contains elements as expected by templates.SchemaStructTmpl.
"""
typ = ''
comment = ''
structs = []
try:
if '$ref' in schema:
# We just assume that there is already a type generated for the reference.
if schema['$ref'] not in parents:
return optionalize(schema['$ref'], optional), structs
return optionalize('Box<' + schema['$ref'] + '>', optional), structs
if 'type' in schema and schema['type'] == 'object':
# There are two types of objects: those with `properties` are translated into a Rust struct,
# and those with `additionalProperties` into a HashMap<String, ...>.
# Structs are represented as dicts that can be used to render the SchemaStructTmpl.
if 'properties' in schema:
name = replace_keywords(name)
typ = name
struct = {'name': name, 'description': schema.get('description', ''), 'fields': []}
for pn, pp in schema['properties'].items():
subtyp, substructs = parse_schema_types(name + capitalize_first(pn),
pp,
optional=True,
parents=parents + [name])
if type(subtyp) is tuple:
subtyp, comment = subtyp
else:
comment = None
cleaned_pn = replace_keywords(pn)
jsonname = pn
cleaned_pn = rust_identifier(cleaned_pn)
struct['fields'].append({
'name':
cleaned_pn,
'original_name':
jsonname,
'attr':
'#[serde(rename = "{}")]'.format(jsonname) +
'\n #[serde(skip_serializing_if = "Option::is_none")]'
if subtyp.startswith('Option') else '',
'typ':
subtyp,
'comment':
comment
})
structs.extend(substructs)
structs.append(struct)
return (optionalize(typ, optional), schema.get('description', '')), structs
if 'additionalProperties' in schema:
field, substructs = parse_schema_types(name,
schema['additionalProperties'],
optional=False,
parents=parents + [name])
structs.extend(substructs)
if type(field) is tuple:
typ = field[0]
else:
typ = field
return (optionalize('HashMap<String,' + typ + '>', optional), schema.get('description', '')), structs
if schema['type'] == 'array':
typ, substructs = parse_schema_types(name, schema['items'], optional=False, parents=parents + [name])
if type(typ) is tuple:
typ = typ[0]
return (optionalize('Vec<' + typ + '>', optional), schema.get('description', '')), structs + substructs
if schema['type'] == 'string':
def build(intt, typ='String'):
return (optionalize(typ, optional), intt + ': ' + schema.get('description', '')), structs
if 'format' in schema:
if schema['format'] == 'int64':
return build('i64')
if schema['format'] == 'int32':
return build('i32')
if schema['format'] == 'uint64':
return build('u64')
if schema['format'] == 'uint32':
return build('u32')
if schema['format'] == 'double':
return build('f64')
if schema['format'] == 'float':
return build('f32')
if schema['format'] == 'date-time':
return build('DateTime', typ='DateTime<Utc>')
return (optionalize('String', optional), schema.get('description', '')), structs
if schema['type'] == 'boolean':
return (optionalize('bool', optional), schema.get('description', '')), structs
if schema['type'] in ('number', 'integer'):
def build(intt):
return (optionalize(intt, optional), schema.get('description', '')), structs
if schema['format'] == 'float':
return build('f32')
if schema['format'] == 'double':
return build('f64')
if schema['format'] == 'int32':
return build('i32')
if schema['format'] == 'int64':
return build('i64')
if schema['format'] == 'uint32':
return build('u32')
if schema['format'] == 'uint64':
return build('u64')
if schema['type'] == 'any':
return (optionalize('String', optional), 'ANY data: ' + schema.get('description', '')), structs
raise Exception('unimplemented schema type!', name, schema)
except KeyError as e:
print('KeyError while processing:', name, schema)
raise e
def generate_params_structs(resources, super_name='', global_params=None):
"""Generate parameter structs from the resources list.
Returns a list of source code strings.
"""
frags = []
for resourcename, resource in resources.items():
for methodname, method in resource.get('methods', {}).items():
param_type_name = snake_to_camel(super_name + capitalize_first(resourcename) +
capitalize_first(methodname) + 'Params')
print("processed:", resourcename, methodname, param_type_name)
struct = {
'name': param_type_name,
'description': 'Parameters for the `{}.{}` method.'.format(resourcename, methodname),
'fields': []
}
req_query_parameters = []
opt_query_parameters = []
if global_params:
struct['fields'].append({
'name': replace_keywords(rust_identifier(global_params)),
'typ': optionalize(global_params, True),
'attr': '#[serde(flatten)]',
'comment': 'General attributes applying to any API call'
})
# Build struct dict for rendering.
if 'parameters' in method:
for paramname, param in method['parameters'].items():
(typ, desc), substructs = parse_schema_types('', param, optional=False, parents=[])
field = {
'name': replace_keywords(rust_identifier(paramname)),
'original_name': paramname,
'typ': optionalize(typ, not param.get('required', False)),
'comment': desc,
'attr': '#[serde(rename = "{}")]'.format(paramname),
}
struct['fields'].append(field)
if param.get('location', '') == 'query':
if param.get('required', False):
req_query_parameters.append(field)
else:
opt_query_parameters.append(field)
frags.append(chevron.render(SchemaStructTmpl, struct))
struct['required_fields'] = req_query_parameters
struct['optional_fields'] = opt_query_parameters
frags.append(chevron.render(SchemaDisplayTmpl, struct))
# Generate parameter types for subresources.
frags.extend(
generate_params_structs(resource.get('resources', {}), super_name=resourcename,
global_params=global_params))
return frags
def resolve_parameters(string, paramsname='params'):
"""Returns a Rust syntax for formatting the given string with API
parameters, and a list of (snake-case) API parameters that are used. This
is typically used to format URL paths containing required parameters for an
API call.
"""
pat = re.compile('\{\+?(\w+)\}')
params = re.findall(pat, string)
snakeparams = [rust_identifier(p) for p in params]
format_params = ','.join([
'{}=percent_encode({}.{}.as_bytes(), NON_ALPHANUMERIC)'.format(p, paramsname, sp)
for (p, sp) in zip(params, snakeparams)
])
string = string.replace('{+', '{')
# Some required parameters are in the URL. This rust syntax formats the relative URL part appropriately.
return 'format!("{}", {})'.format(string, format_params), snakeparams
def generate_service(resource, methods, discdoc, generate_subresources=True):
"""Generate the code for all methods in a resource.
Returns a rendered string with source code.
"""
service = capitalize_first(resource)
# Source code fragments implementing the methods.
method_fragments = []
# Source code fragments for impls of subordinate resources.
subresource_fragments = []
# Generate methods for subresources.
if generate_subresources:
for subresname, subresource in methods.get('resources', {}).items():
subresource_fragments.append(generate_service(service + capitalize_first(subresname), subresource, discdoc))
for methodname, method in methods.get('methods', {}).items():
# Goal: Instantiate the templates for upload and non-upload methods.
# e.g. FilesGetParams
params_type_name = service + capitalize_first(methodname) + 'Params'
# All parameters that are optional (as URL parameters)
parameters = {
p: rust_identifier(p)
for p, pp in method.get('parameters', {}).items() if ('required' not in pp and pp['location'] != 'path')
}
# All required parameters not represented in the path.
required_parameters = {
p: rust_identifier(p)
for p, pp in method.get('parameters', {}).items() if ('required' in pp and pp['location'] != 'path')
}
# Types of the function
in_type = method['request']['$ref'] if 'request' in method else None
out_type = method['response']['$ref'] if 'response' in method else '()'
is_download = method.get('supportsMediaDownload', False)
is_authd = 'scopes' in method
media_upload = method.get('mediaUpload', {})
supported_uploads = []
if 'simple' in media_upload.get('protocols', {}):
simple_upload_path = media_upload['protocols']['simple']['path']
supported_uploads.append('simple')
else:
simple_upload_path = ''
if 'resumable' in media_upload.get('protocols', {}):
resumable_upload_path = media_upload['protocols']['resumable']['path']
supported_uploads.append('resumable')
else:
resumable_upload_path = ''
http_method = method['httpMethod']
has_global_params = 'parameters' in discdoc
# This relies on URL path parameters being required parameters (not
# optional). If this invariant is not fulfilled, the Rust code may not
# compile.
formatted_path, required_params = resolve_parameters(method['path'])
formatted_simple_upload_path, required_params = resolve_parameters(simple_upload_path)
formatted_resumable_upload_path, required_params = resolve_parameters(resumable_upload_path)
scopetype, scopeval = scopes_url_to_enum_val(discdoc['name'], method.get('scopes', [''])[-1])
scope_enum = scopetype + '::' + scopeval
if is_download:
data_download = {
'name':
rust_identifier(methodname),
'param_type':
params_type_name,
'in_type':
in_type,
'download_in_type':
in_type if in_type else 'EmptyRequest',
'out_type':
out_type,
'base_path':
discdoc['baseUrl'],
'root_path':
discdoc['rootUrl'],
'rel_path_expr':
formatted_path,
'params': [{
'param': p,
'snake_param': sp
} for (p, sp) in parameters.items()],
'required_params': [{
'param': p,
'snake_param': sp
} for (p, sp) in required_parameters.items()],
'global_params_name':
rust_identifier(global_params_name(discdoc.get('name', ''))) if has_global_params else None,
'scopes': [{
'scope': scope_enum,
}],
'description':
method.get('description', ''),
'http_method':
http_method,
'wants_auth':
is_authd,
}
method_fragments.append(chevron.render(DownloadMethodTmpl, data_download))
else:
data_normal = {
'name':
rust_identifier(methodname),
'param_type':
params_type_name,
'in_type':
in_type,
'out_type':
out_type,
'base_path':
discdoc['baseUrl'],
'root_path':
discdoc['rootUrl'],
'rel_path_expr':
formatted_path,
'params': [{
'param': p,
'snake_param': sp
} for (p, sp) in parameters.items()],
'global_params_name':
rust_identifier(global_params_name(discdoc.get('name', ''))) if has_global_params else None,
'required_params': [{
'param': p,
'snake_param': sp
} for (p, sp) in required_parameters.items()],
'scopes': [{
'scope': scope_enum,
}],
'description':
method.get('description', ''),
'http_method':
http_method,
'wants_auth':
is_authd,
}
method_fragments.append(chevron.render(NormalMethodTmpl, data_normal))
# We generate an additional implementation with the option of uploading data.
data_upload = {
'name': rust_identifier(methodname),
'param_type': params_type_name,
'in_type': in_type,
'out_type': out_type,
'base_path': discdoc['baseUrl'],
'root_path': discdoc['rootUrl'],
'simple_rel_path_expr': formatted_simple_upload_path.lstrip('/'),
'resumable_rel_path_expr': formatted_resumable_upload_path.lstrip('/'),
'global_params_name':
rust_identifier(global_params_name(discdoc.get('name', ''))) if has_global_params else None,
'params': [{
'param': p,
'snake_param': sp
} for (p, sp) in parameters.items()],
'required_params': [{
'param': p,
'snake_param': sp
} for (p, sp) in required_parameters.items()],
'scopes': [{
'scope': scope_enum,
}],
'description': method.get('description', ''),
'http_method': http_method,
'wants_auth': is_authd,
}
if 'simple' in supported_uploads:
method_fragments.append(chevron.render(UploadMethodTmpl, data_upload))
if 'resumable' in supported_uploads:
method_fragments.append(chevron.render(ResumableUploadMethodTmpl, data_upload))
return chevron.render(
ServiceImplementationTmpl, {
'service': service,
'name': capitalize_first(discdoc.get('name', '')),
'base_path': discdoc['baseUrl'],
'root_path': discdoc['rootUrl'],
'wants_auth': 'auth' in discdoc,
'methods': [{
'text': t
} for t in method_fragments]
}) + '\n'.join(subresource_fragments)
def scopes_url_to_enum_val(apiname, url):
rawname = url.split('/')[-1]
fancy_name = snake_to_camel(rawname.replace('-', '_').replace('.', '_'))
return (snake_to_camel(apiname)+'Scopes', fancy_name)
def generate_scopes_type(name, scopes):
"""Generate types for the `scopes` dictionary (path: auth.oauth2.scopes in a discovery document),
containing { scope_url: { description: "..." } }.
"""
if len(scopes) == 0:
return ''
parameters = {'scopes': []}
for url, desc in scopes.items():
enum_type_name, fancy_name = scopes_url_to_enum_val(name, url)
parameters['name'] = enum_type_name
parameters['scopes'].append({'scope_name': fancy_name, 'desc': desc.get('description', ''), 'url': url})
return chevron.render(OauthScopesType, parameters)
def generate_all(discdoc):
"""Generate all structs and impls, and render them into a file."""
print('Processing:', discdoc.get('id', ''))
schemas = discdoc.get('schemas', {})
resources = discdoc.get('resources', {})
# Generate scopes.
scopes_type = generate_scopes_type(discdoc['name'], discdoc.get('auth', {}).get('oauth2', {}).get('scopes', {}))
# Generate parameter types (*Params - those are used as "side inputs" to requests)
params_struct_name = global_params_name(discdoc.get('name'))
parameter_types = generate_params_structs(resources, global_params=params_struct_name)
# Generate service impls.
services = []
for resource, methods in resources.items():
services.append(generate_service(resource, methods, discdoc))
if 'methods' in discdoc:
services.append(generate_service('Global', discdoc, discdoc, generate_subresources=False))
# Generate schema types.
structs = []
for name, desc in schemas.items():
typ, substructs = parse_schema_types(name, desc)
structs.extend(substructs)
# Generate global parameters struct and its Display impl.
if 'parameters' in discdoc:
schema = {'type': 'object', 'properties': discdoc['parameters']}
name = replace_keywords(snake_to_camel(params_struct_name))
typ, substructs = parse_schema_types(name, schema)
for s in substructs:
s['optional_fields'] = s['fields']
parameter_types.append(chevron.render(SchemaDisplayTmpl, s))
structs.extend(substructs)
# Assemble everything into a file.
modname = (discdoc['id'] + '_types').replace(':', '_')
out_path = path.join('gen', modname + '.rs')
with open(out_path, 'w') as f:
f.write(RustHeader)
f.write(scopes_type)
# Render resource structs.
for s in structs:
for field in s['fields']:
if field.get('comment', None):
field['comment'] = field.get('comment', '').replace('\n', ' ')
if not s['name']:
print('WARN', s)
f.write(chevron.render(SchemaStructTmpl, s))
# Render *Params structs.
for pt in parameter_types:
f.write(pt)
# Render service impls.
for s in services:
f.write(s)
try:
subprocess.run(['rustfmt', out_path, '--edition=2018'])
except:
return
def from_cache(apiId):
try:
with open(path.join('cache', apiId + '.json'), 'r') as f:
print('Found API description in cache for', apiId)
return json.load(f)
except Exception as e:
print('Fetching description from cache failed:', e)
return None
def to_cache(apiId, doc):
try:
os.makedirs('cache', exist_ok=True)
with open(path.join('cache', apiId + '.json'), 'w') as f:
json.dump(doc, f)
except Exception as e:
print(e)
return None
return None
def fetch_discovery_base(url, apis):
"""Fetch the discovery base document from `url`. Return api documents for APIs with IDs in `apis`.
Returns:
List of API JSON documents.
"""
doc = from_cache('_global_discovery')
if not doc:
doc = json.loads(requests.get(url).text)
to_cache('_global_discovery', doc)
return [it for it in doc['items'] if (not apis or it['id'] in apis)]
def fetch_discovery_doc(url_or_path):
"""Fetch discovery document for a given (short) API doc from the overall discovery document."""
cachekey = url_or_path.replace('/', '_')
cached = from_cache(cachekey)
if cached:
return cached
if url_or_path.startswith('http'):
js = json.loads(requests.get(url_or_path).text)
to_cache(cachekey, js)
else:
with open(url_or_path, 'r') as f:
js = json.load(f)
return js
def main():
p = argparse.ArgumentParser(description='Generate Rust code for asynchronous REST Google APIs.')
p.add_argument('--discovery_base',
default='https://www.googleapis.com/discovery/v1/apis',
help='Base Discovery document.')
p.add_argument('--only_apis', default='drive:v3', help='Only process APIs with these IDs (comma-separated)')
p.add_argument('--doc', default='', help='Directly process Discovery document from this URL')
p.add_argument('--list', default=False, help='List available APIs', action='store_true')
args = p.parse_args()
if args.only_apis:
apilist = args.only_apis.split(',')
else:
apilist = []
if args.list:
docs = fetch_discovery_base(args.discovery_base, [])
for doc in docs:
print('API:', doc['title'], 'ID:', doc['id'])
return
if args.doc:
discdoc = fetch_discovery_doc(args.doc)
if 'error' in discdoc:
print('Error while fetching document for', doc['id'], ':', discdoc)
return
if 'methods' in discdoc:
#raise NotImplementedError("top-level methods are not yet implemented properly. Please take care.")
pass
generate_all(discdoc)
return
docs = fetch_discovery_base(args.discovery_base, apilist)
for doc in docs:
try:
discdoc = fetch_discovery_doc(doc['discoveryRestUrl'])
if 'methods' in discdoc:
raise NotImplementedError("top-level methods are not yet implemented properly. Please take care.")
if 'error' in discdoc:
print('Error while fetching document for', doc['id'], ':', discdoc)
continue
generate_all(discdoc)
except Exception as e:
print("Error while processing", discdoc)
raise e
continue
if __name__ == '__main__':
main()
|
import torch
from torch import nn
from torchvision.models import resnet50, resnet18, resnet34
from torch import einsum
import torch.nn.functional as F
# from resnet import resnet34
try:
from itertools import ifilterfalse
except ImportError:
from itertools import filterfalse as ifilterfalse
class ConvRelu(nn.Module):
def __init__(self, in_, out):
super().__init__()
self.conv = nn.Conv2d(in_, out, 3, padding=1)
self.bn = nn.BatchNorm2d(out)
self.activation = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.activation(x)
return x
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DecoderBlock, self).__init__()
self.in_channels = in_channels
self.block = nn.Sequential(
ConvRelu(in_channels, out_channels),
ConvRelu(out_channels, out_channels),
ConvRelu(out_channels, out_channels)
)
def forward(self, x):
return self.block(x)
class UnetOverResnet18(nn.Module):
def __init__(self, num_up_filters=512, pretrained=True):
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
self.up_sample = nn.functional.interpolate
encoder = resnet18(pretrained=pretrained)
self.conv1 = nn.Sequential(
encoder.conv1,
encoder.bn1,
encoder.relu
)
self.conv2 = encoder.layer1
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
self.center = DecoderBlock(512, num_up_filters)
self.dec5 = DecoderBlock(512 + num_up_filters, num_up_filters // 2)
self.dec4 = DecoderBlock(256 + num_up_filters // 2, num_up_filters // 4)
self.dec3 = DecoderBlock(128 + num_up_filters // 4, num_up_filters // 8)
self.dec2 = DecoderBlock(64 + num_up_filters // 8, num_up_filters // 16)
self.dec1 = DecoderBlock(64 + num_up_filters // 16, num_up_filters // 32)
self.dec0 = ConvRelu(num_up_filters // 32, num_up_filters // 32)
self.final = nn.Conv2d(num_up_filters // 32, 3, kernel_size=1)
def forward(self, x):
conv1 = self.conv1(x)
conv1_pool = self.pool(conv1)
conv2 = self.conv2(conv1_pool)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([self.up_sample(center, scale_factor=2), conv5], 1))
dec4 = self.dec4(torch.cat([self.up_sample(dec5, scale_factor=2), conv4], 1))
dec3 = self.dec3(torch.cat([self.up_sample(dec4, scale_factor=2), conv3], 1))
dec2 = self.dec2(torch.cat([self.up_sample(dec3, scale_factor=2), conv2], 1))
dec1 = self.dec1(torch.cat([self.up_sample(dec2, scale_factor=2), conv1], 1))
dec0 = self.dec0(self.up_sample(dec1, scale_factor=2))
x_out = self.final(dec0)
return x_out
class Unet34(nn.Module):
def __init__(self, num_up_filters=512, pretrained=True):
super().__init__()
self.pool = nn.MaxPool2d(2, 2)
encoder = resnet34(pretrained=pretrained)
self.relu = nn.ReLU(inplace=True)
self.conv1 = nn.Sequential(encoder.conv1,
encoder.bn1,
self.relu)
self.conv2 = encoder.layer1
self.conv3 = encoder.layer2
self.conv4 = encoder.layer3
self.conv5 = encoder.layer4
self.center = DecoderBlock(512, num_up_filters)
self.dec5 = DecoderBlock(512 + num_up_filters, num_up_filters // 2)
self.dec4 = DecoderBlock(256 + num_up_filters // 2, num_up_filters // 4)
self.dec3 = DecoderBlock(128 + num_up_filters // 4, num_up_filters // 8)
self.dec2 = DecoderBlock(64 + num_up_filters // 8, num_up_filters // 16)
self.dec1 = DecoderBlock(64 + num_up_filters // 16, num_up_filters // 32)
self.dec0 = ConvRelu(num_up_filters // 32, num_up_filters // 32)
self.final = nn.Conv2d(num_up_filters // 32, 3, kernel_size=1)
self.up_sample = nn.functional.interpolate
def forward(self, x):
conv1 = self.conv1(x)
conv1_pool = self.pool(conv1)
conv2 = self.conv2(conv1_pool)
conv3 = self.conv3(conv2)
conv4 = self.conv4(conv3)
conv5 = self.conv5(conv4)
center = self.center(self.pool(conv5))
dec5 = self.dec5(torch.cat([self.up_sample(center, scale_factor=2), conv5], 1))
dec4 = self.dec4(torch.cat([self.up_sample(dec5, scale_factor=2), conv4], 1))
dec3 = self.dec3(torch.cat([self.up_sample(dec4, scale_factor=2), conv3], 1))
dec2 = self.dec2(torch.cat([self.up_sample(dec3, scale_factor=2), conv2], 1))
dec1 = self.dec1(torch.cat([self.up_sample(dec2, scale_factor=2), conv1], 1))
dec0 = self.dec0(self.up_sample(dec1, scale_factor=2))
x_out = self.final(dec0)
return x_out
if __name__ == '__main__':
model = UnetOverResnet18(pretrained=False)
y = torch.zeros((1, 3, 256, 256))
x = model(y)
print(x.size()) |
import math
import os
import logging
from qtpy.QtWidgets import (QWidget, QStyle, QStyleOption)
from qtpy.QtGui import (QColor, QPainter, QBrush, QPen, QPolygon, QPolygonF, QPixmap,
QMovie)
from qtpy.QtCore import Property, Qt, QPoint, QPointF, QSize, Slot, QTimer
from qtpy.QtDesigner import QDesignerFormWindowInterface
from .base import PyDMWidget
from ..utilities import is_qt_designer, find_file
logger = logging.getLogger(__name__)
def deg_to_qt(deg):
"""
Converts from degrees to QT degrees.
16 deg = 1 QTdeg
Parameters
----------
deg : float
The value to convert.
Returns
-------
float
The value converted.
"""
# Angles for Qt are in units of 1/16 of a degree
return deg * 16
def qt_to_deg(deg):
"""
Converts from QT degrees to degrees.
16 deg = 1 QTdeg
Parameters
----------
deg : float
The value to convert.
Returns
-------
float
The value converted.
"""
# Angles for Qt are in units of 1/16 of a degree
return deg / 16.0
class PyDMDrawing(QWidget, PyDMWidget):
"""
Base class to be used for all PyDM Drawing Widgets.
This class inherits from QWidget and PyDMWidget.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
self._rotation = 0.0
self._brush = QBrush(Qt.SolidPattern)
self._original_brush = None
self._painter = QPainter()
self._pen = QPen(Qt.NoPen)
self._pen_style = Qt.NoPen
self._pen_cap_style = Qt.SquareCap
self._pen_join_style = Qt.MiterJoin
self._pen_width = 0
self._pen_color = QColor(0, 0, 0)
self._pen.setCapStyle(self._pen_cap_style)
self._pen.setJoinStyle(self._pen_join_style)
self._original_pen_style = self._pen_style
self._original_pen_color = self._pen_color
QWidget.__init__(self, parent)
PyDMWidget.__init__(self, init_channel=init_channel)
self.alarmSensitiveBorder = False
def sizeHint(self):
return QSize(100, 100)
def paintEvent(self, _):
"""
Paint events are sent to widgets that need to update themselves,
for instance when part of a widget is exposed because a covering
widget was moved.
At PyDMDrawing this method handles the alarm painting with parameters
from the stylesheet, configures the brush, pen and calls ```draw_item```
so the specifics can be performed for each of the drawing classes.
Parameters
----------
event : QPaintEvent
"""
painter = QPainter(self)
opt = QStyleOption()
opt.initFrom(self)
self.style().drawPrimitive(QStyle.PE_Widget, opt, painter, self)
painter.setRenderHint(QPainter.Antialiasing)
painter.setBrush(self._brush)
painter.setPen(self._pen)
self.draw_item(painter)
def draw_item(self, painter):
"""
The classes inheriting from PyDMDrawing must overwrite this method.
This method translate the painter to the center point given by
```get_center``` and rotate the canvas by the given amount of
degrees.
"""
xc, yc = self.get_center()
painter.translate(xc, yc)
painter.rotate(-self._rotation)
def get_center(self):
"""
Simple calculation of the canvas' center point.
Returns
-------
x, y : float
Tuple with X and Y coordinates of the center.
"""
return self.width() * 0.5, self.height() * 0.5
def get_bounds(self, maxsize=False, force_no_pen=False):
"""
Returns a tuple containing the useful area for the drawing.
Parameters
----------
maxsize : bool, default is False
If True, width and height information are based on the
maximum inner rectangle dimensions given by ```get_inner_max```,
otherwise width and height will receive the widget size.
force_no_pen : bool, default is False
If True the pen width will not be considered when calculating
the bounds.
Returns
-------
x, y, w, h : tuple
Tuple with X and Y coordinates followed by the maximum width
and height.
"""
w, h = self.width(), self.height()
if maxsize:
w, h = self.get_inner_max()
xc, yc = w * 0.5, h * 0.5
if self.has_border() and not force_no_pen:
w = max(0, w - 2 * self._pen_width)
h = max(0, h - 2 * self._pen_width)
x = max(0, self._pen_width)
y = max(0, self._pen_width)
else:
x = 0
y = 0
return x - xc, y - yc, w, h
def has_border(self):
"""
Check whether or not the drawing have a border based on the
Pen Style and Pen width.
Returns
-------
bool
True if the drawing has a border, False otherwise.
"""
if self._pen.style() != Qt.NoPen and self._pen_width > 0:
return True
else:
return False
def is_square(self):
"""
Check if the widget has the same width and height values.
Returns
-------
bool
True in case the widget has a square shape, False otherwise.
"""
return self.height() == self.width()
def get_inner_max(self):
"""
Calculates the largest inner rectangle in a rotated rectangle.
This implementation was based on https://stackoverflow.com/a/18402507
Returns
-------
w, h : tuple
The width and height of the largest rectangle.
"""
# Based on https://stackoverflow.com/a/18402507
w0 = 0
h0 = 0
angle = math.radians(self._rotation)
origWidth = self.width()
origHeight = self.height()
if origWidth == 0:
logger.error("Invalid width. The value must be greater than {0}".format(origWidth))
return
if origHeight == 0:
logger.error("Invalid height. The value must be greater than {0}".format(origHeight))
return
if (origWidth <= origHeight):
w0 = origWidth
h0 = origHeight
else:
w0 = origHeight
h0 = origWidth
# Angle normalization in range [-PI..PI)
ang = angle - math.floor((angle + math.pi) / (2 * math.pi)) * 2 * math.pi
ang = math.fabs(ang)
if ang > math.pi / 2:
ang = math.pi - ang
c = w0 / (h0 * math.sin(ang) + w0 * math.cos(ang))
w = 0
h = 0
if (origWidth <= origHeight):
w = w0 * c
h = h0 * c
else:
w = h0 * c
h = w0 * c
return w, h
@Property(QBrush)
def brush(self):
"""
PyQT Property for the brush object to be used when coloring the
drawing
Returns
-------
QBrush
"""
return self._brush
@brush.setter
def brush(self, new_brush):
"""
PyQT Property for the brush object to be used when coloring the
drawing
Parameters
----------
new_brush : QBrush
"""
if new_brush != self._brush:
if self._alarm_state == PyDMWidget.ALARM_NONE:
self._original_brush = new_brush
self._brush = new_brush
self.update()
@Property(Qt.PenStyle)
def penStyle(self):
"""
PyQT Property for the pen style to be used when drawing the border
Returns
-------
int
Index at Qt.PenStyle enum
"""
return self._pen_style
@penStyle.setter
def penStyle(self, new_style):
"""
PyQT Property for the pen style to be used when drawing the border
Parameters
----------
new_style : int
Index at Qt.PenStyle enum
"""
if self._alarm_state == PyDMWidget.ALARM_NONE:
self._original_pen_style = new_style
if new_style != self._pen_style:
self._pen_style = new_style
self._pen.setStyle(new_style)
self.update()
@Property(Qt.PenCapStyle)
def penCapStyle(self):
"""
PyQT Property for the pen cap to be used when drawing the border
Returns
-------
int
Index at Qt.PenCapStyle enum
"""
return self._pen_cap_style
@penCapStyle.setter
def penCapStyle(self, new_style):
"""
PyQT Property for the pen cap style to be used when drawing the border
Parameters
----------
new_style : int
Index at Qt.PenStyle enum
"""
if new_style != self._pen_cap_style:
self._pen_cap_style = new_style
self._pen.setCapStyle(new_style)
self.update()
@Property(Qt.PenJoinStyle)
def penJoinStyle(self):
"""
PyQT Property for the pen join style to be used when drawing the border
Returns
-------
int
Index at Qt.PenJoinStyle enum
"""
return self._pen_join_style
@penJoinStyle.setter
def penJoinStyle(self, new_style):
"""
PyQT Property for the pen join style to be used when drawing the border
Parameters
----------
new_style : int
Index at Qt.PenStyle enum
"""
if new_style != self._pen_join_style:
self._pen_join_style = new_style
self._pen.setJoinStyle(new_style)
self.update()
@Property(QColor)
def penColor(self):
"""
PyQT Property for the pen color to be used when drawing the border
Returns
-------
QColor
"""
return self._pen_color
@penColor.setter
def penColor(self, new_color):
"""
PyQT Property for the pen color to be used when drawing the border
Parameters
----------
new_color : QColor
"""
if self._alarm_state == PyDMWidget.ALARM_NONE:
self._original_pen_color = new_color
if new_color != self._pen_color:
self._pen_color = new_color
self._pen.setColor(new_color)
self.update()
@Property(float)
def penWidth(self):
"""
PyQT Property for the pen width to be used when drawing the border
Returns
-------
float
"""
return self._pen_width
@penWidth.setter
def penWidth(self, new_width):
"""
PyQT Property for the pen width to be used when drawing the border
Parameters
----------
new_width : float
"""
if new_width < 0:
return
if new_width != self._pen_width:
self._pen_width = new_width
self._pen.setWidth(self._pen_width)
self.update()
@Property(float)
def rotation(self):
"""
PyQT Property for the counter-clockwise rotation in degrees
to be applied to the drawing.
Returns
-------
float
"""
return self._rotation
@rotation.setter
def rotation(self, new_angle):
"""
PyQT Property for the counter-clockwise rotation in degrees
to be applied to the drawing.
Parameters
----------
new_angle : float
"""
if new_angle != self._rotation:
self._rotation = new_angle
self.update()
def alarm_severity_changed(self, new_alarm_severity):
PyDMWidget.alarm_severity_changed(self, new_alarm_severity)
if new_alarm_severity == PyDMWidget.ALARM_NONE:
if self._original_brush is not None:
self.brush = self._original_brush
if self._original_pen_color is not None:
self.penColor = self._original_pen_color
if self._original_pen_style is not None:
self.penStyle = self._original_pen_style
class PyDMDrawingLine(PyDMDrawing):
"""
A widget with a line drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingLine, self).__init__(parent, init_channel)
self.rotation = 45
self.penStyle = Qt.SolidLine
self.penWidth = 1
def draw_item(self, painter):
"""
Draws the line after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingLine, self).draw_item(painter)
x, y, w, h = self.get_bounds()
painter.drawLine(x, y, w, h)
class PyDMDrawingImage(PyDMDrawing):
"""
Renders an image given by the ``filename`` property.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
Attributes
----------
null_color : Qt.Color
QColor to fill the image if the filename is not found.
"""
null_color = Qt.gray
def __init__(self, parent=None, init_channel=None, filename=""):
super(PyDMDrawingImage, self).__init__(parent, init_channel)
hint = super(PyDMDrawingImage, self).sizeHint()
self._pixmap = QPixmap(hint)
self._pixmap.fill(self.null_color)
self._aspect_ratio_mode = Qt.KeepAspectRatio
self._movie = None
self._file = None
# Make sure we don't set a non-existant file
if filename:
self.filename = filename
# But we always have an internal value to reference
else:
self._file = filename
if is_qt_designer(): # pragma: no cover
designer_window = self.get_designer_window()
if designer_window is not None:
designer_window.fileNameChanged.connect(self.designer_form_saved)
QTimer.singleShot(200, self.reload_image)
def get_designer_window(self): # pragma: no cover
# Internal function to find the designer window that owns this widget.
p = self.parent()
while p is not None:
if isinstance(p, QDesignerFormWindowInterface):
return p
p = p.parent()
return None
@Slot(str)
def designer_form_saved(self, filename): # pragma: no cover
self.filename = self._file
def reload_image(self):
self.filename = self._file
@Property(str)
def filename(self):
"""
The filename of the image to be displayed.
This can be an absolute or relative path to the display file.
Returns
-------
str
The filename configured.
"""
return self._file
@filename.setter
def filename(self, new_file):
"""
The filename of the image to be displayed.
This file can be either relative to the ``.ui`` file or absolute. If
the path does not exist, a shape of ``.null_color`` will be displayed
instead.
Parameters
-------
new_file : str
The filename to be used
"""
# Expand user (~ or ~user) and environment variables.
pixmap = None
self._file = new_file
abs_path = os.path.expanduser(os.path.expandvars(self._file))
# Find the absolute path relative to UI
if not os.path.isabs(abs_path):
parent_display = self.find_parent_display()
base_path = None
if parent_display:
base_path = os.path.dirname(parent_display.loaded_file())
abs_path = find_file(abs_path, base_path=base_path)
if not abs_path:
logger.exception("Unable to find full filepath for %s",
self._file)
return
# Check that the path exists
if os.path.isfile(abs_path):
if self._movie is not None:
self._movie.stop()
self._movie.deleteLater()
self._movie = None
if not abs_path.endswith(".gif"):
pixmap = QPixmap(abs_path)
else:
self._movie = QMovie(abs_path, parent=self)
self._movie.setCacheMode(QMovie.CacheAll)
self._movie.frameChanged.connect(self.movie_frame_changed)
if self._movie.frameCount() > 1:
self._movie.finished.connect(self.movie_finished)
self._movie.start()
# Return a blank image if we don't have a valid path
else:
# Warn the user loudly if their file does not exist, but avoid
# doing this in Designer as this spams the user as they are typing
if not is_qt_designer(): # pragma: no cover
logger.error("Image file %r does not exist", abs_path)
pixmap = QPixmap(self.sizeHint())
pixmap.fill(self.null_color)
# Update the display
if pixmap is not None:
self._pixmap = pixmap
self.update()
def sizeHint(self):
if self._pixmap.size().isEmpty():
return super(PyDMDrawingImage, self).sizeHint()
return self._pixmap.size()
@Property(Qt.AspectRatioMode)
def aspectRatioMode(self):
"""
PyQT Property for aspect ratio mode to be used when rendering
the image
Returns
-------
int
Index at Qt.AspectRatioMode enum
"""
return self._aspect_ratio_mode
@aspectRatioMode.setter
def aspectRatioMode(self, new_mode):
"""
PyQT Property for aspect ratio mode to be used when rendering
the image
Parameters
----------
new_mode : int
Index at Qt.AspectRatioMode enum
"""
if new_mode != self._aspect_ratio_mode:
self._aspect_ratio_mode = new_mode
self.update()
def draw_item(self, painter):
"""
Draws the image after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingImage, self).draw_item(painter)
x, y, w, h = self.get_bounds(maxsize=True, force_no_pen=True)
if not isinstance(self._pixmap, QMovie):
_scaled = self._pixmap.scaled(w, h, self._aspect_ratio_mode,
Qt.SmoothTransformation)
# Make sure the image is centered if smaller than the widget itself
if w > _scaled.width():
logger.debug("Centering image horizontally ...")
x += (w-_scaled.width())/2
if h > _scaled.height():
logger.debug("Centering image vertically ...")
y += (h - _scaled.height())/2
painter.drawPixmap(x, y, _scaled)
def movie_frame_changed(self, frame_no):
"""
Callback executed when a new frame is available at the QMovie.
Parameters
----------
frame_no : int
The new frame index
Returns
-------
None
"""
if self._movie is None:
return
curr_pixmap = self._movie.currentPixmap()
self._pixmap = curr_pixmap
self.update()
def movie_finished(self):
"""
Callback executed when the movie is finished.
Returns
-------
None
"""
if self._movie is None:
return
self._movie.start()
class PyDMDrawingRectangle(PyDMDrawing):
"""
A widget with a rectangle drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingRectangle, self).__init__(parent, init_channel)
def draw_item(self, painter):
"""
Draws the rectangle after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingRectangle, self).draw_item(painter)
x, y, w, h = self.get_bounds(maxsize=True)
painter.drawRect(x, y, w, h)
class PyDMDrawingTriangle(PyDMDrawing):
"""
A widget with a triangle drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingTriangle, self).__init__(parent, init_channel)
def _calculate_drawing_points(self, x, y, w, h):
return [
QPoint(x, h / 2.0),
QPoint(x, y),
QPoint(w / 2.0, y)
]
def draw_item(self, painter):
"""
Draws the triangle after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingTriangle, self).draw_item(painter)
x, y, w, h = self.get_bounds(maxsize=True)
points = self._calculate_drawing_points(x, y, w, h)
painter.drawPolygon(QPolygon(points))
class PyDMDrawingEllipse(PyDMDrawing):
"""
A widget with an ellipse drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingEllipse, self).__init__(parent, init_channel)
def draw_item(self, painter):
"""
Draws the ellipse after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingEllipse, self).draw_item(painter)
maxsize = not self.is_square()
_, _, w, h = self.get_bounds(maxsize=maxsize)
painter.drawEllipse(QPoint(0, 0), w / 2.0, h / 2.0)
class PyDMDrawingCircle(PyDMDrawing):
"""
A widget with a circle drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingCircle, self).__init__(parent, init_channel)
def _calculate_radius(self, width, height):
return min(width, height) / 2.0
def draw_item(self, painter):
"""
Draws the circle after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingCircle, self).draw_item(painter)
_, _, w, h = self.get_bounds()
r = self._calculate_radius(w, h)
painter.drawEllipse(QPoint(0, 0), r, r)
class PyDMDrawingArc(PyDMDrawing):
"""
A widget with an arc drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingArc, self).__init__(parent, init_channel)
self.penStyle = Qt.SolidLine
self.penWidth = 1.0
self._start_angle = 0
self._span_angle = deg_to_qt(90)
@Property(float)
def startAngle(self):
"""
PyQT Property for the start angle in degrees
Returns
-------
float
Angle in degrees
"""
return qt_to_deg(self._start_angle)
@startAngle.setter
def startAngle(self, new_angle):
"""
PyQT Property for the start angle in degrees
Parameters
----------
new_angle : float
Angle in degrees
"""
if deg_to_qt(new_angle) != self._start_angle:
self._start_angle = deg_to_qt(new_angle)
self.update()
@Property(float)
def spanAngle(self):
"""
PyQT Property for the span angle in degrees
Returns
-------
float
Angle in degrees
"""
return qt_to_deg(self._span_angle)
@spanAngle.setter
def spanAngle(self, new_angle):
"""
PyQT Property for the span angle in degrees
Parameters
----------
new_angle : float
Angle in degrees
"""
if deg_to_qt(new_angle) != self._span_angle:
self._span_angle = deg_to_qt(new_angle)
self.update()
def draw_item(self, painter):
"""
Draws the arc after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingArc, self).draw_item(painter)
maxsize = not self.is_square()
x, y, w, h = self.get_bounds(maxsize=maxsize)
painter.drawArc(x, y, w, h, self._start_angle, self._span_angle)
class PyDMDrawingPie(PyDMDrawingArc):
"""
A widget with a pie drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingPie, self).__init__(parent, init_channel)
def draw_item(self, painter):
"""
Draws the pie after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingPie, self).draw_item(painter)
maxsize = not self.is_square()
x, y, w, h = self.get_bounds(maxsize=maxsize)
painter.drawPie(x, y, w, h, self._start_angle, self._span_angle)
class PyDMDrawingChord(PyDMDrawingArc):
"""
A widget with a chord drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingChord, self).__init__(parent, init_channel)
def draw_item(self, painter):
"""
Draws the chord after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingChord, self).draw_item(painter)
maxsize = not self.is_square()
x, y, w, h = self.get_bounds(maxsize=maxsize)
painter.drawChord(x, y, w, h, self._start_angle, self._span_angle)
class PyDMDrawingPolygon(PyDMDrawing):
"""
A widget with a polygon drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingPolygon, self).__init__(parent, init_channel)
self._num_points = 3
@Property(int)
def numberOfPoints(self):
"""
PyQT Property for the number of points
Returns
-------
int
Number of Points
"""
return self._num_points
@numberOfPoints.setter
def numberOfPoints(self, points):
if points >= 3 and points != self._num_points:
self._num_points = points
self.update()
def _calculate_drawing_points(self, x, y, w, h):
#(x + r*cos(theta), y + r*sin(theta))
r = min(w, h)/2.0
deg_step = 360.0/self._num_points
points = []
for i in range(self._num_points):
xp = r * math.cos(math.radians(deg_step * i))
yp = r * math.sin(math.radians(deg_step * i))
points.append(QPointF(xp, yp))
return points
def draw_item(self, painter):
"""
Draws the Polygon after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingPolygon, self).draw_item(painter)
maxsize = not self.is_square()
x, y, w, h = self.get_bounds(maxsize=not self.is_square())
poly = self._calculate_drawing_points(x, y, w, h)
painter.drawPolygon(QPolygonF(poly))
class PyDMDrawingPolyline(PyDMDrawing):
"""
A widget with a multi-segment, piecewise-linear line drawn in it.
This class inherits from PyDMDrawing.
Parameters
----------
parent : QWidget
The parent widget for the Label
init_channel : str, optional
The channel to be used by the widget.
"""
def __init__(self, parent=None, init_channel=None):
super(PyDMDrawingPolyline, self).__init__(parent, init_channel)
self.penStyle = Qt.SolidLine
self.penWidth = 1
self._points = []
def draw_item(self, painter):
"""
Draws the segmented line after setting up the canvas with a call to
```PyDMDrawing.draw_item```.
"""
super(PyDMDrawingPolyline, self).draw_item(painter)
x, y, w, h = self.get_bounds()
def p2d(pt):
"convert point to drawing coordinates"
# drawing coordinates are centered: (0,0) is in center
# our points are absolute: (0,0) is upper-left corner
u, v = map(int, pt.split(","))
return QPoint(u+x, v+y)
if len(self._points) > 1:
for i, p1 in enumerate(self._points[:-1]):
painter.drawLine(p2d(p1), p2d(self._points[i+1]))
def getPoints(self):
return self._points
def _validator_(self, value):
"""
ensure that `value` has correct form
Parameters
----------
value : [str]
List of strings representing ordered pairs
of integer coordinates. Each ordered pair
is comma-separated (although white-space
separated is acceptable as input).
Returns
----------
verified : [str]
List of strings in standard format
"""
def isinteger(value):
value = value.strip()
try:
float(value)
return True
except:
return False
verified = []
for i, pt in enumerate(value):
point = pt.split(",")
if len(point) != 2:
point = pt.split() # tolerant of space-separated
if len(point) != 2:
emsg = "polyline point %d must be two values, comma-separated, received '%s'" % (i+1, pt)
logger.exception(emsg)
return
if not isinteger(point[0]):
emsg = "polyline point %d content must be integer, received '%s'" % (i+1, point[0])
logger.exception(emsg)
return
if not isinteger(point[1]):
emsg = "polyline point %d content must be integer, received '%s'" % (i+1, point[1])
logger.exception(emsg)
return
verified.append(", ".join(point))
return verified
def setPoints(self, value):
if len(value) < 2:
emsg = "Must have two or more points"
logger.exception(emsg)
return
verified = self._validator_(value)
if verified is not None:
self._points = verified
self.update()
def resetPoints(self):
self._points = []
self.update()
points = Property("QStringList", getPoints, setPoints, resetPoints)
|
import knight
class Value():
@classmethod
def parse(cls, stream):
if not isinstance(stream, knight.Stream):
stream = knight.Stream(stream)
while stream.matches(r'(?:#.*?(\n|\Z)|\A[\s()\[\]{}:])*'):
pass
for subcls in [Number, Text, Boolean, Identifier, Null, Ast]:
if None != (value := subcls.parse(stream)):
return value
@classmethod
def create(cls, data):
if isinstance(data, Value):
return data
elif isinstance(data, str):
return Text(data)
elif isinstance(data, bool):
return Boolean(data)
elif isinstance(data, int):
return Number(data)
elif data == None:
return Null(None)
else:
raise TypeError(f"unknown value kind '{type(data)}'")
def __init__(self, data):
if type(self) == Value:
raise RuntimeError("nope")
self.data = data
def __repr__(self):
return f"Value({repr(self.data)})"
def run(self):
return self
def __str__(self):
return str(self.run().data)
def __int__(self):
return int(self.run().data)
def __bool__(self):
return bool(self.run().data)
def __add__(self, rhs):
return Number(int(self) + int(rhs))
def __sub__(self, rhs):
return Number(int(self) - int(rhs))
def __mul__(self, rhs):
return Number(int(self) * int(rhs))
def __div__(self, rhs):
return Number(int(self) / int(rhs))
def __mod__(self, rhs):
return Number(int(self) % int(rhs))
def __pow__(self, rhs):
return Number(int(self) ** int(rhs))
def __lt__(self, rhs):
return int(self) < (int(rhs))
def __eq__(self, rhs):
return type(self) == type(rhs) and self.data == rhs.data
class Number(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'\d+'):
return Number(int(match))
class Text(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'(["\'])((?:.|\n)*?)(\1|\Z)'):
if match[0] not in ['"', '\''] or match[0] != match[-1]:
# note that the stream is still advanced...
raise ArgumentError("unterminated string encountered: " + match)
else:
return Text(match[1:-1])
def __add__(self, rhs):
return Text(str(self) + str(rhs))
def __mul__(self, rhs):
return Text(str(self) * int(rhs))
def __lt__(self, rhs):
return str(self) < str(rhs)
class Boolean(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'[TF][A-Z]*'):
return Boolean(match[0] == 'T')
def __str__(self):
return "true" if self.data else "false"
class Null(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'N[A-Z]*'):
return Null(None)
def __str__(self):
return "null"
class Identifier(Value):
@classmethod
def parse(cls, stream):
if match := stream.matches(r'[a-z_][a-z0-9_]*'):
return Identifier(match)
def run(self):
return knight.ENVIRONMENT[self.data]
class Ast(Value):
@classmethod
def parse(cls, stream):
if func := knight.Function.known.get(str(stream)[0]):
stream.matches(r'[A-Z]+|.')
return Ast(func, [Value.parse(stream) for _ in range(func.arity)])
def __init__(self, func, args):
self.func = func
self.args = args
def __repr__(self):
return f"Value({repr(self.func)}, {repr(self.args)})"
def run(self):
return self.func(*self.args)
|
#!/usr/bin/python
from topo_base.fabric_to_vm_inter_vn import FabricToVmInterVn
from topo_base.fabric_to_vm_intra_vn import FabricToVmIntraVn
from topo_base.vm_to_fabric_inter_vn import VmToFabricInterVn
from topo_base.vm_to_fabric_intra_vn import VmToFabricIntraVn
from topo_base.vm_to_vm_inter_vn import VmToVmInterVn
from topo_base.vm_to_vm_intra_vn import VmToVmIntraVn
import os
import sys
sys.path.append(os.getcwd())
sys.path.append(os.getcwd() + '/lib/')
from imports import * # noqa
class TestHbsFabricToVmInterVn(FabricToVmInterVn):
def test_hbs_fabric_to_vm_inter_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=3,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=4,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=5,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=3,
vrf_hbfr_vif_idx=4)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send mplsudp packet from fabric
icmp_inner = IcmpPacket(
sip='2.2.2.3',
dip='1.1.1.3',
icmp_type=constants.ECHO_REPLY,
id=4145)
pkt = icmp_inner.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
mpls = MplsoUdpPacket(
label=42,
sip='8.0.0.3',
dip='8.0.0.2',
smac='00:1b:21:bb:f9:46',
dmac='00:1b:21:bb:f9:48',
sport=53363,
dport=6635,
id=10,
inner_pkt=pkt)
pkt = mpls.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# Make sure the packet comes goes to hbs-r (tap8b05a86b-36)
rcv_pkt = self.fabric_interface.send_and_receive_packet(
pkt, hbs_r_vif)
# TODO: Send the rcv_pkt to the next call instead of
# forming a new packet
# Inject the packet from hbs-l to vrouter
# Encode the flow id in the dst mac of the packet
icmp = IcmpPacket(
sip='1.0.0.5',
dip='1.0.0.3',
smac='00:00:5e:00:01:00',
dmac='c0:d2:00:06:08:f0',
icmp_type=0,
id=4145)
pkt = icmp.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# Send it to hbs-l
rcv_pkt = hbs_l_vif.send_and_receive_packet(pkt, self.tenant_vif)
# Check if the packet was sent to vrouter (by vtest) on fabric
# and received at tenant_vif (by vtest)
self.assertEqual(1, self.fabric_interface.get_vif_ipackets())
self.assertEqual(1, self.tenant_vif.get_vif_opackets())
# Check if the packet was sent to hbs-r (by vrouter)
# and received at hbs-l (by vtest)
self.assertEqual(1, hbs_r_vif.get_vif_opackets())
self.assertEqual(1, hbs_l_vif.get_vif_ipackets())
class TestHbsFabricToVmIntraVn(FabricToVmIntraVn):
def test_hbs_fabric_to_vm_intra_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=3,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=4,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add Bridge Route
bridge_route = BridgeRoute(
vrf=5,
mac_str="02:c2:23:4c:d0:55",
nh_idx=44)
bridge_route.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=5,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=3,
vrf_hbfr_vif_idx=4)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send mplsudp packet from fabric
icmp_inner = IcmpPacket(
sip='1.1.1.5',
dip='1.1.1.3',
smac='02:e7:03:ea:67:f1',
dmac='02:c2:23:4c:d0:55',
icmp_type=constants.ECHO_REPLY,
id=4145)
pkt = icmp_inner.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
mpls = MplsoUdpPacket(
label=42,
sip='8.0.0.3',
dip='8.0.0.2',
smac='00:1b:21:bb:f9:46',
dmac='00:1b:21:bb:f9:48',
sport=53363,
dport=6635,
id=10,
inner_pkt=pkt)
pkt = mpls.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# Make sure the packet comes goes to hbs-r (tap8b05a86b-36)
hbsr_pkt = self.fabric_interface.send_and_receive_packet(
pkt, hbs_r_vif)
# Send it to hbs-l
tenant_pkt = hbs_l_vif.send_and_receive_packet(
hbsr_pkt, self.tenant_vif)
self.assertIsNotNone(tenant_pkt)
self.assertTrue(ICMP in tenant_pkt)
self.assertEqual("1.1.1.5", tenant_pkt[IP].src)
self.assertEqual("1.1.1.3", tenant_pkt[IP].dst)
self.assertEqual("02:c2:23:4c:d0:55", tenant_pkt[Ether].dst)
self.assertEqual("02:e7:03:ea:67:f1", tenant_pkt[Ether].src)
# Check if the packet was sent to vrouter (by vtest) on fabric
# and received at tenant_vif (by vtest)
self.assertEqual(1, self.fabric_interface.get_vif_ipackets())
self.assertEqual(1, self.tenant_vif.get_vif_opackets())
# Check if the packet was sent to hbs-r (by vrouter)
# and received at hbs-l (by vtest)
self.assertEqual(1, hbs_r_vif.get_vif_opackets())
self.assertEqual(1, hbs_l_vif.get_vif_ipackets())
def test_hbs_cem_11144(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=3,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=4,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add Bridge Route
bridge_route = BridgeRoute(
vrf=5,
mac_str="02:c2:23:4c:d0:55",
nh_idx=44)
bridge_route.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=5,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=3,
vrf_hbfr_vif_idx=4)
vrf.sync()
self.f_flow.delete()
self.r_flow.delete()
# send mplsudp packet from fabric
# This creates a flow in hold state
icmp_inner = IcmpPacket(
sip='1.1.1.5',
dip='1.1.1.3',
smac='02:e7:03:ea:67:f1',
dmac='02:c2:23:4c:d0:55',
icmp_type=constants.ECHO_REPLY,
id=4145)
pkt = icmp_inner.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
mpls = MplsoUdpPacket(
label=42,
sip='8.0.0.3',
dip='8.0.0.2',
smac='00:1b:21:bb:f9:46',
dmac='00:1b:21:bb:f9:48',
sport=53363,
dport=6635,
id=10,
inner_pkt=pkt)
pkt = mpls.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# Send the packet from fabric
rcv_pkt = self.fabric_interface.send_packet(
pkt)
# Flow is created but in Hold state
# Set forwarding action for rflow now
self.r_flow.fr_gen_id = self.r_flow.fr_gen_id + 1
self.r_flow.fr_flags = constants.VR_FLOW_FLAG_ACTIVE
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.r_flow.sync(resp_required=True)
# Wait for some time for the held packet to be flushed by vrouter
time.sleep(2)
# Check if the flushed packet was sent by vrouter on hbs-r
self.assertEqual(1, hbs_r_vif.get_vif_opackets())
class TestHbsVmToFabricInterVn(VmToFabricInterVn):
def test_hbs_vm_to_fabric_inter_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=4,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=5,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add hbs-l and hbs-r in vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=2,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=4,
vrf_hbfr_vif_idx=5)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
icmp_pkt = IcmpPacket(
sip='1.1.1.3',
dip='2.2.2.3',
smac='02:03:eb:4b:e8:d8',
dmac='00:00:5e:00:01:00',
id=1418)
pkt = icmp_pkt.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# send packet
hbfl_pkt = self.tenant_vif.send_and_receive_packet(pkt, hbs_l_vif)
self.assertIsNotNone(hbfl_pkt)
hbfl_pkt.show()
# Send it to hbs-r and expect response on fabric
fab_pkt = hbs_r_vif.send_and_receive_packet(
hbfl_pkt, self.fabric_interface)
self.assertIsNotNone(fab_pkt)
fab_pkt.show()
# Check if fabric got a MPLSoUDP packet
self.assertTrue((UDP in fab_pkt) and (fab_pkt[UDP].dport == 6635))
# Check if the packet was sent to vrouter (by vtest) on tenant_vif
# and received at fabric (by vtest)
self.assertEqual(1, self.tenant_vif.get_vif_ipackets())
self.assertEqual(1, self.fabric_interface.get_vif_opackets())
# Check if the packet was sent to hbs-l (by vrouter)
# and received at hbs-r (by vtest)
self.assertEqual(1, hbs_l_vif.get_vif_opackets())
self.assertEqual(1, hbs_r_vif.get_vif_ipackets())
class TestHbsVmToFabricIntraVn(VmToFabricIntraVn):
def test_hbs_vm_to_fabric_intra_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=3,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=4,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=5,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=3,
vrf_hbfr_vif_idx=4)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send ping request from tenant_vif
icmp = IcmpPacket(
sip='1.0.0.3',
dip='1.0.0.5',
smac='02:c2:23:4c:d0:55',
dmac='02:e7:03:ea:67:f1',
id=4145)
pkt = icmp.get_packet()
pkt.show()
# send packet and receive on hbs-l
hbsl_pkt = self.tenant_vif.send_and_receive_packet(pkt, hbs_l_vif)
# Inject the packet from hbs-r to vrouter
# Encode the flow id in the src mac of the packet
icmp = IcmpPacket(
sip='1.0.0.3',
dip='1.0.0.5',
smac='ca:f1:00:00:d9:d4',
dmac='02:e7:03:ea:67:f1',
id=4145)
pkt = icmp.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# Send it to hbs-r and expect response on fabric vif
fabric_pkt = hbs_r_vif.send_and_receive_packet(
hbsl_pkt, self.fabric_vif)
self.assertIsNotNone(fabric_pkt)
self.assertTrue(UDP in fabric_pkt)
self.assertEqual(6635, fabric_pkt[UDP].dport)
self.assertEqual("172.16.17.32", fabric_pkt[IP].src)
self.assertEqual("8.0.0.3", fabric_pkt[IP].dst)
# Check if the packet was sent to vrouter (by vtest) on tenant_vif
# and received at fabric (by test)
self.assertEqual(1, self.tenant_vif.get_vif_ipackets())
self.assertEqual(1, self.fabric_vif.get_vif_opackets())
# Check if the packet was sent to hbs-l (by vrouter)
# and received at hbs-r (by vtest)
self.assertEqual(1, hbs_l_vif.get_vif_opackets())
self.assertEqual(1, hbs_r_vif.get_vif_ipackets())
class TestHbsVmToVmInterVn(VmToVmInterVn):
def test_hbs_vm_to_vm_inter_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap3",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=5,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap4",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=6,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add vif3 Nexthop (bridge)
# pkt from hbs-r to vif 3 will need a lookup of dst-mac
# in the bridge table
# this is because dmac would have been encoded with flow id
vif3_nhb = EncapNextHop(
encap_oif_id=self.vif3.idx(),
encap="02 88 67 0c 2e 11 00 00 5e 00 01 00 08 00",
nh_idx=27,
nh_family=constants.AF_BRIDGE,
nh_vrf=3,
nh_flags=constants.NH_FLAG_POLICY_ENABLED |
constants.NH_FLAG_ETREE_ROOT)
vif3_nhb.sync()
# Add bridge Route
bridge_route = BridgeRoute(
vrf=3,
mac_str="02:88:67:0c:2e:11",
nh_idx=27)
bridge_route.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=3,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=5,
vrf_hbfr_vif_idx=6)
vrf.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=4,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=5,
vrf_hbfr_vif_idx=6)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send ping request from vif3 and receive in hbs-l
icmp = IcmpPacket(
sip='1.1.1.4',
dip='2.2.2.4',
smac='02:88:67:0c:2e:11',
dmac='00:00:5e:00:01:00',
id=1136)
pkt = icmp.get_packet()
pkt.show()
# send packet
hbsl_pkt = self.vif3.send_and_receive_packet(pkt, hbs_l_vif)
# send hbsl packet to hbs-r
vif4_pkt = hbs_r_vif.send_and_receive_packet(hbsl_pkt, self.vif4)
# check if we got ICMP packet
self.assertTrue(ICMP in vif4_pkt)
self.assertEqual('1.1.1.4', vif4_pkt[IP].src)
self.assertEqual('2.2.2.4', vif4_pkt[IP].dst)
# send ping response from tenant_vif4 and receive in hbs-r
icmp = IcmpPacket(
sip='2.2.2.4',
dip='1.1.1.4',
smac='02:e7:03:ea:67:f1',
dmac='00:00:5e:00:01:00',
icmp_type=constants.ECHO_REPLY,
id=1136)
pkt = icmp.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# send packet
hbsr_pkt = self.vif4.send_and_receive_packet(pkt, hbs_r_vif)
hbsr_pkt.show()
# TODO: Use hbsr_pkt instead of this
#
# send ping response from hbs-r and receive in tenant_vif3
icmp = IcmpPacket(
sip='2.2.2.4',
dip='1.1.1.4',
smac='00:00:5e:00:01:00',
dmac='c0:d1:00:04:05:8c',
icmp_type=constants.ECHO_REPLY,
id=1136)
pkt = icmp.get_packet()
pkt.show()
self.assertIsNotNone(pkt)
# send packet
vif3_pkt = hbs_l_vif.send_and_receive_packet(pkt, self.vif3)
# check if we got ICMP packet
self.assertTrue(ICMP in vif4_pkt)
self.assertEqual('2.2.2.4', vif3_pkt[IP].src)
self.assertEqual('1.1.1.4', vif3_pkt[IP].dst)
# Check if the packet was sent on tenant_vif3 and received at hbs-l
self.assertEqual(1, self.vif3.get_vif_ipackets())
self.assertEqual(1, hbs_l_vif.get_vif_opackets())
# Check if the packet was sent to hbs-r and received from tenant_vif4
self.assertEqual(1, hbs_r_vif.get_vif_opackets())
self.assertEqual(1, self.vif4.get_vif_ipackets())
# Check if the packet was sent on tenant_vif4 and received at hbs-r
self.assertEqual(1, self.vif4.get_vif_opackets())
self.assertEqual(1, hbs_r_vif.get_vif_ipackets())
# Check if the packet was sent to hbs-l and received from tenant_vif3
self.assertEqual(1, self.vif3.get_vif_opackets())
self.assertEqual(1, hbs_l_vif.get_vif_ipackets())
class TestHbsVmToVmIntraVn(VmToVmIntraVn):
def test_hbs_left_vm_to_right_vm_intra_vm(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=5,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=6,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=2,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=5,
vrf_hbfr_vif_idx=6)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send ping request from tenant_vif3
icmp = IcmpPacket(
sip='1.1.1.4',
dip='1.1.1.5',
smac='02:88:67:0c:2e:11',
dmac='02:e7:03:ea:67:f1',
id=1136)
pkt = icmp.get_packet()
pkt.show()
# send packet and receive on hbs-l
hbsl_pkt = self.vif3.send_and_receive_packet(pkt, hbs_l_vif)
# send the packet on hbs-r and receive in vif4
vif4_pkt = hbs_r_vif.send_and_receive_packet(hbsl_pkt, self.vif4)
self.assertIsNotNone(vif4_pkt)
self.assertTrue(ICMP in vif4_pkt)
self.assertEqual("1.1.1.4", vif4_pkt[IP].src)
self.assertEqual("1.1.1.5", vif4_pkt[IP].dst)
# Check if the packet was sent on tenant_vif3 and received at
# tenant_vif4
self.assertEqual(1, self.vif3.get_vif_ipackets())
self.assertEqual(1, self.vif4.get_vif_opackets())
# Check if the packet was sent to hbs-l and received from hbs-r
self.assertEqual(1, hbs_l_vif.get_vif_opackets())
self.assertEqual(1, hbs_r_vif.get_vif_ipackets())
def test_hbs_right_vm_to_left_vm_intra_vn(self):
# Add hbs-l vif
hbs_l_vif = VirtualVif(
name="tap1589a2b3-22",
ipv4_str="172.16.17.32",
mac_str="00:00:5e:00:01:00",
idx=5,
vrf=3,
flags=constants.VIF_FLAG_HBS_LEFT)
hbs_l_vif.sync()
# Add hbs-r vif
hbs_r_vif = VirtualVif(
name="tap8b05a86b-36",
ipv4_str="192.168.127.12",
mac_str="00:00:5e:00:01:00",
idx=6,
vrf=4,
flags=constants.VIF_FLAG_HBS_RIGHT)
hbs_r_vif.sync()
# Add hbs-l and hbs-r in the vrf table
vrf = Vrf(
vrf_rid=0,
vrf_idx=2,
vrf_flags=constants.VRF_FLAG_VALID |
constants.VRF_FLAG_HBS_L_VALID |
constants.VRF_FLAG_HBS_R_VALID,
vrf_hbfl_vif_idx=5,
vrf_hbfr_vif_idx=6)
vrf.sync()
self.f_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_LEFT
self.r_flow.fr_flags1 = constants.VR_FLOW_FLAG1_HBS_RIGHT
self.f_flow.sync()
self.r_flow.sync()
# send ping request from vif4
icmp = IcmpPacket(
sip='1.1.1.5',
dip='1.1.1.4',
smac='02:e7:03:ea:67:f1',
dmac='02:88:67:0c:2e:11',
icmp_type=constants.ECHO_REPLY,
id=1136)
pkt = icmp.get_packet()
pkt.show()
# send packet and receive on hbs-r
hbsr_pkt = self.vif4.send_and_receive_packet(pkt, hbs_r_vif)
# send packet in hbsl and receive on vif3
vif3_pkt = hbs_l_vif.send_and_receive_packet(hbsr_pkt, self.vif3)
self.assertIsNotNone(vif3_pkt)
self.assertTrue(ICMP in vif3_pkt)
self.assertEqual("1.1.1.5", vif3_pkt[IP].src)
self.assertEqual("1.1.1.4", vif3_pkt[IP].dst)
# Check if the packet was sent on vif4 and received at
# vif3
self.assertEqual(1, self.vif4.get_vif_ipackets())
self.assertEqual(1, self.vif3.get_vif_opackets())
# Check if the packet was sent to hbs-r and received from hbs-l
self.assertEqual(1, hbs_r_vif.get_vif_opackets())
self.assertEqual(1, hbs_l_vif.get_vif_ipackets())
|
<filename>getmovielens.py<gh_stars>0
import pandas as pd
import numpy as np
from sklearn.model_selection import KFold
from utils.preprocessing import preprocess
from utils.kfold import get_kfold
import argparse
import os
random_state = 20191109
np.random.seed(random_state)
def main(args):
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
rpath = os.path.join(args.dir,args.rname)
tpath = os.path.join(args.dir,args.tname)
mpath = os.path.join(args.dir,args.mname)
ratings = pd.read_csv(rpath)
tags = pd.read_csv(tpath)
movies = pd.read_csv(mpath)
ratings = ratings.rename(columns={'movieId':'itemId'})
tags = tags.rename(columns={'movieId':'itemId'})
tags['tag'] = tags['tag'].astype(str)
tags = tags[tags.tag != 'BD-R']
movies = movies.rename(columns={'movieId':'itemId','title':'name'})
ratings, tags, interactions, movies, tag_tagId = preprocess(ratings = ratings, tags = tags, \
items = movies, tag_user_threshold = args.tag_user_threshold, tag_item_threshold = args.tag_item_threshold)
kf = KFold(n_splits = args.k, shuffle = True, random_state = random_state)
k = 1
tag_tagId.to_csv(os.path.join(args.save_dir, 'tag_tagId.csv'), index=False)
movies.sort_values(by='itemId').to_csv(os.path.join(args.save_dir, 'movies.csv'), index=False)
for train_index, test_index in kf.split(interactions):
train, valid, test = \
get_kfold(ratings, tags, interactions, train_index, test_index, args.val_ratio)
train[0].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'tr_ratings'+str(k)+'.csv'), index=False)
train[1].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'tr_tags'+str(k)+'.csv'), index=False)
valid[0].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'val_ratings'+str(k)+'.csv'), index=False)
valid[1].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'val_tags'+str(k)+'.csv'), index=False)
test[0].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'te_ratings'+str(k)+'.csv'), index=False)
test[1].sort_values(by='userId').to_csv(os.path.join(args.save_dir, 'te_tags'+str(k)+'.csv'), index=False)
k+= 1
if __name__ == "__main__":
# Commandline arguments
parser = argparse.ArgumentParser(description="Data Preprocess")
parser.add_argument('--dir', dest='dir', default="ml-latest/")
parser.add_argument('--save', dest= 'save_dir', default='data/movielens')
parser.add_argument('--rating', dest='rname', default='ratings.csv')
parser.add_argument('--tag', dest='tname', default='tags.csv')
parser.add_argument('--movie', dest='mname', default='movies.csv')
parser.add_argument('--tu', dest='tag_user_threshold', default=10)
parser.add_argument('--ti', dest='tag_item_threshold', default=10)
parser.add_argument('--k', dest='k', default=5)
parser.add_argument('--ratio', dest='val_ratio', default=0.3)
# parser.add_argument('--rm', dest='minrating', default=0)
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - <NAME>, <EMAIL>, 2017
import os
import re
import glob
from time import sleep
from pilot.common.errorcodes import ErrorCodes
from pilot.util.container import execute
from pilot.util.filehandling import copy
import logging
logger = logging.getLogger(__name__)
errors = ErrorCodes()
def get_analysis_trf(transform, workdir):
"""
Prepare to download the user analysis transform with curl.
The function will verify the download location from a known list of hosts.
:param transform: full trf path (url) (string).
:param workdir: work directory (string).
:return: exit code (int), diagnostics (string), transform_name (string)
"""
ec = 0
diagnostics = ""
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
search_pattern = "%s/jobO.*.tar.gz" % harvester_workdir
logger.debug("search_pattern - %s" % search_pattern)
jobopt_files = glob.glob(search_pattern)
for jobopt_file in jobopt_files:
logger.debug("jobopt_file = %s workdir = %s" % (jobopt_file, workdir))
try:
copy(jobopt_file, workdir)
except Exception as e:
logger.error("could not copy file %s to %s : %s" % (jobopt_file, workdir, e))
if '/' in transform:
transform_name = transform.split('/')[-1]
else:
logger.warning('did not detect any / in %s (using full transform name)' % transform)
transform_name = transform
# is the command already available? (e.g. if already downloaded by a preprocess/main process step)
if os.path.exists(os.path.join(workdir, transform_name)):
logger.info('script %s is already available - no need to download again' % transform_name)
return ec, diagnostics, transform_name
original_base_url = ""
# verify the base URL
for base_url in get_valid_base_urls():
if transform.startswith(base_url):
original_base_url = base_url
break
if original_base_url == "":
diagnostics = "invalid base URL: %s" % transform
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
# try to download from the required location, if not - switch to backup
status = False
for base_url in get_valid_base_urls(order=original_base_url):
trf = re.sub(original_base_url, base_url, transform)
logger.debug("attempting to download script: %s" % trf)
status, diagnostics = download_transform(trf, transform_name, workdir)
if status:
break
if not status:
return errors.TRFDOWNLOADFAILURE, diagnostics, ""
logger.info("successfully downloaded script")
path = os.path.join(workdir, transform_name)
logger.debug("changing permission of %s to 0o755" % path)
try:
os.chmod(path, 0o755) # Python 2/3
except Exception as e:
diagnostics = "failed to chmod %s: %s" % (transform_name, e)
return errors.CHMODTRF, diagnostics, ""
return ec, diagnostics, transform_name
def get_valid_base_urls(order=None):
"""
Return a list of valid base URLs from where the user analysis transform may be downloaded from.
If order is defined, return given item first.
E.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
NOTE: the URL list may be out of date.
:param order: order (string).
:return: valid base URLs (list).
"""
valid_base_urls = []
_valid_base_urls = ["https://storage.googleapis.com/drp-us-central1-containers",
"http://pandaserver-doma.cern.ch:25080/trf/user"]
if order:
valid_base_urls.append(order)
for url in _valid_base_urls:
if url != order:
valid_base_urls.append(url)
else:
valid_base_urls = _valid_base_urls
return valid_base_urls
def download_transform(url, transform_name, workdir):
"""
Download the transform from the given url
:param url: download URL with path to transform (string).
:param transform_name: trf name (string).
:param workdir: work directory (string).
:return:
"""
status = False
diagnostics = ""
path = os.path.join(workdir, transform_name)
cmd = 'curl -sS \"%s\" > %s' % (url, path)
trial = 1
max_trials = 3
# test if $HARVESTER_WORKDIR is set
harvester_workdir = os.environ.get('HARVESTER_WORKDIR')
if harvester_workdir is not None:
# skip curl by setting max_trials = 0
max_trials = 0
source_path = os.path.join(harvester_workdir, transform_name)
try:
copy(source_path, path)
status = True
except Exception as error:
status = False
diagnostics = "Failed to copy file %s to %s : %s" % (source_path, path, error)
logger.error(diagnostics)
# try to download the trf a maximum of 3 times
while trial <= max_trials:
logger.info("executing command [trial %d/%d]: %s" % (trial, max_trials, cmd))
exit_code, stdout, stderr = execute(cmd, mute=True)
if not stdout:
stdout = "(None)"
if exit_code != 0:
# Analyze exit code / output
diagnostics = "curl command failed: %d, %s, %s" % (exit_code, stdout, stderr)
logger.warning(diagnostics)
if trial == max_trials:
logger.fatal('could not download transform: %s' % stdout)
status = False
break
else:
logger.info("will try again after 60 s")
sleep(60)
else:
logger.info("curl command returned: %s" % stdout)
status = True
break
trial += 1
return status, diagnostics
|
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sympy.solvers import solve
from sympy import Symbol
from matplotlib import patches
import matplotlib.patches as mpatches
import scipy.io as sio
# plotting configuration
ratio = 1.5
figure_len, figure_width = 15*ratio, 12*ratio
font_size_1, font_size_2 = 36*ratio, 36*ratio
legend_size = 18*ratio
line_width, tick_len = 3*ratio, 10*ratio
marker_size = 15*ratio
plot_line_width = 5*ratio
hfont = {'fontname': 'Arial'}
# simulation setup
dt = 0.0001
T = int(9/dt)
# neuronal parameters
tau_e, tau_i = 0.020, 0.010
alpha_e, alpha_i = 2, 2
# adaptation
U, U_max = 1, 6
tau_x = 0.20
# network connectivity
Jee = 1.8
Jie = 1.0
Jei = 1.0
Jii = 0.6
l_b_before_stimulation = [True, False]
for b_before_stimulation in l_b_before_stimulation:
x = 1
r_e, r_i = 0, 0
z_e, z_i = 0, 0
l_r_e, l_r_i = [], []
for i in range(T):
if 50000 <= i < 70000:
g_e, g_i = 3.0, 2
else:
g_e, g_i = 1.55, 2
if b_before_stimulation:
if 42000 < i <= 49000:
g_i = 2.1
else:
pass
else:
if 62000 < i <= 69000:
g_i = 2.1
else:
pass
g_e = g_e * (g_e > 0)
g_i = g_i * (g_i > 0)
# SSN part
z_e = Jee * r_e - Jei * r_i + g_e
z_i = Jie * x * r_e - Jii * r_i + g_i
z_e = z_e * (z_e > 0)
z_i = z_i * (z_i > 0)
r_e = r_e + (-r_e + np.power(z_e, alpha_e)) / tau_e * dt
r_i = r_i + (-r_i + np.power(z_i, alpha_i)) / tau_i * dt
r_e = r_e * (r_e > 0)
r_i = r_i * (r_i > 0)
# adaptation of excitatory neurons
x = x + ((U - x) / tau_x + U * (U_max - x) * r_e) * dt
x = np.clip(x, 0, U_max)
l_r_e.append(r_e)
l_r_i.append(r_i)
l_r_e = np.asarray(l_r_e)
l_r_i = np.asarray(l_r_i)
if b_before_stimulation:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
mean_e = l_r_e / np.mean(l_r_e[40000:42000])
mean_i = l_r_i / np.mean(l_r_i[40000:42000])
plt.plot(mean_e, color='blue', linewidth=plot_line_width)
plt.plot(mean_i, color='red', linewidth=plot_line_width)
plt.xticks([40000, 42000, 44000, 46000, 48000], [1.0, 1.2, 1.4, 1.6, 1.8], fontsize=font_size_1, **hfont)
plt.yticks([0, 0.2, 0.4, 0.6, 0.8, 1.0, 1.2], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Normalized firing rate', fontsize=font_size_1, **hfont)
plt.xlim([40000, 48000])
plt.ylim([0, 1.2])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1})
plt.hlines(y=1, xmin=42000, xmax=50000, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
plt.savefig(
'paper_figures/png/Fig_4S_Supralinear_network_2D_EI_STP_normalized_activity_paradoxical_effect_before_stimulation.png')
plt.savefig(
'paper_figures/pdf/Fig_4S_Supralinear_network_2D_EI_STP_normalized_activity_paradoxical_effect_before_stimulation.pdf')
else:
plt.figure(figsize=(figure_len, figure_width))
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(True)
ax.spines['left'].set_visible(True)
for axis in ['top', 'bottom', 'left', 'right']:
ax.spines[axis].set_linewidth(line_width)
plt.tick_params(width=line_width, length=tick_len)
mean_e = l_r_e / np.mean(l_r_e[60000:62000])
mean_i = l_r_i / np.mean(l_r_i[60000:62000])
plt.plot(mean_e, color='blue', linewidth=plot_line_width)
plt.plot(mean_i, color='red', linewidth=plot_line_width)
plt.xticks([60000, 62000, 64000, 66000, 68000], [3.0, 3.2, 3.4, 3.6, 3.8], fontsize=font_size_1,
**hfont)
plt.yticks([0.85, 0.9, 0.95, 1.0, 1.05], fontsize=font_size_1, **hfont)
plt.xlabel('Time (s)', fontsize=font_size_1, **hfont)
plt.ylabel('Normalized firing rate', fontsize=font_size_1, **hfont)
plt.xlim([60000, 68000])
plt.ylim([0.85, 1.05])
plt.legend(['Exc', 'Inh'], prop={"family": "Arial", 'size': font_size_1})
plt.hlines(y=1, xmin=62000, xmax=70000, colors='k', linestyles=[(0, (6, 6, 6, 6))], linewidth=line_width)
plt.savefig(
'paper_figures/png/Fig_4S_Supralinear_network_2D_EI_STP_normalized_activity_paradoxical_effect_during_stimulation.png')
plt.savefig(
'paper_figures/pdf/Fig_4S_Supralinear_network_2D_EI_STP_normalized_activity_paradoxical_effect_during_stimulation.pdf')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.