max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
espider/espider/config_default.py | MeteorsHub/espider | 1 | 6615351 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
espider.conf.config_default.py
---------------------------------------------------------------------------------------
Set up default config, if you wan't to change some configs, please do so in
config_override.py in your project path.
:Copyright (c) 2016 MeteorKepler
:license: MIT, see LICENSE for more details.
"""
__author__ = 'MeteorKepler'
__all__ = [
'configs',
]
configs = {
'logging':{
'level':'INFO',
'filelog':True,
'formatter_style':0,
'filename':'resources/espider.log',
'filemode':'a'
},
'mysql':{
'host':'localhost',
'port':3316,
'user':'root',
'password':'<PASSWORD>',
'db':'espider',
'table':'default'
},
'proxy':{
'rescrab':False,
'retest':False,
'srcname':'resources/proxy.pro',
'mode':1,
'timeout':3,
'proxysrc':2,
'srcpage':1
},
'urlrequest':{
'User-Agent':['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5',
]
},
'http':{
'sleeptime':0.4,
'timeout':5,
'retry':3,
'proxy':False,
'selephan':False
},
'selephan':{
'timeout':5,
'loadimages':False
},
'spider':{
'retry':5,
'catalogueLimit':'inf',
'contentLimit':'inf',
'pipelinepath':'pipeline/',
'cataloguefilename':'pipeline/catalogueUrl.txt',
'contentfilename':'pipeline/contentUrl.txt',
'contentupdatefilename':'pipeline/contentUpdateList.txt',
'contentbackuppath':'pipeline/backup/',
'contentdatapath':'pipeline/data/',
'uncatchableurlfilename':'pipeline/uncatchable.txt',
'loadurllistfromfile':False,
'mode':'override'
},
'parse':{
'file':True,
'contentpath':'pipeline/parsedData/',
'contentfile':'dataDict.txt',
'savemethod':'w+',
'mysql':False
}
} | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
espider.conf.config_default.py
---------------------------------------------------------------------------------------
Set up default config, if you wan't to change some configs, please do so in
config_override.py in your project path.
:Copyright (c) 2016 MeteorKepler
:license: MIT, see LICENSE for more details.
"""
__author__ = 'MeteorKepler'
__all__ = [
'configs',
]
configs = {
'logging':{
'level':'INFO',
'filelog':True,
'formatter_style':0,
'filename':'resources/espider.log',
'filemode':'a'
},
'mysql':{
'host':'localhost',
'port':3316,
'user':'root',
'password':'<PASSWORD>',
'db':'espider',
'table':'default'
},
'proxy':{
'rescrab':False,
'retest':False,
'srcname':'resources/proxy.pro',
'mode':1,
'timeout':3,
'proxysrc':2,
'srcpage':1
},
'urlrequest':{
'User-Agent':['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5',
]
},
'http':{
'sleeptime':0.4,
'timeout':5,
'retry':3,
'proxy':False,
'selephan':False
},
'selephan':{
'timeout':5,
'loadimages':False
},
'spider':{
'retry':5,
'catalogueLimit':'inf',
'contentLimit':'inf',
'pipelinepath':'pipeline/',
'cataloguefilename':'pipeline/catalogueUrl.txt',
'contentfilename':'pipeline/contentUrl.txt',
'contentupdatefilename':'pipeline/contentUpdateList.txt',
'contentbackuppath':'pipeline/backup/',
'contentdatapath':'pipeline/data/',
'uncatchableurlfilename':'pipeline/uncatchable.txt',
'loadurllistfromfile':False,
'mode':'override'
},
'parse':{
'file':True,
'contentpath':'pipeline/parsedData/',
'contentfile':'dataDict.txt',
'savemethod':'w+',
'mysql':False
}
} | en | 0.444204 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- espider.conf.config_default.py --------------------------------------------------------------------------------------- Set up default config, if you wan't to change some configs, please do so in config_override.py in your project path. :Copyright (c) 2016 MeteorKepler :license: MIT, see LICENSE for more details. | 1.58461 | 2 |
CODE/Text_to_CSV.py | saicharan637/Geo-Mapping-of-Entities-Using-BERT | 0 | 6615352 | <gh_stars>0
import csv
import os
import pandas as pd
from pandas.io.common import EmptyDataError
os.chdir('C:/Users/<NAME>/Desktop/data')
from pathlib import Path
with open('data.csv', 'w', encoding='utf-8') as out_file:
csv_out = csv.writer(out_file)
for fileName in Path('.').glob('*.txt'):
lines = [ ]~
with open(str(fileName.absolute()),'rb') as one_text:
for line in one_text.readlines():
lines.append(line.decode(encoding='utf-8',errors='ignore').strip())
csv_out.writerow([str(fileName).join(lines)])
df = pd.read_csv('C:/Users/<NAME>/Desktop/data/data.csv')
df.dropna(axis=0, how='all',inplace=True)
df.to_csv('new.csv', index=False) | import csv
import os
import pandas as pd
from pandas.io.common import EmptyDataError
os.chdir('C:/Users/<NAME>/Desktop/data')
from pathlib import Path
with open('data.csv', 'w', encoding='utf-8') as out_file:
csv_out = csv.writer(out_file)
for fileName in Path('.').glob('*.txt'):
lines = [ ]~
with open(str(fileName.absolute()),'rb') as one_text:
for line in one_text.readlines():
lines.append(line.decode(encoding='utf-8',errors='ignore').strip())
csv_out.writerow([str(fileName).join(lines)])
df = pd.read_csv('C:/Users/<NAME>/Desktop/data/data.csv')
df.dropna(axis=0, how='all',inplace=True)
df.to_csv('new.csv', index=False) | none | 1 | 3.144723 | 3 | |
hoomd/deprecated/test-py/test_analyze_msd.py | PetersResearchGroup/PCND | 2 | 6615353 | <gh_stars>1-10
# -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd;
hoomd.context.initialize()
from hoomd import deprecated
import unittest
import os
import tempfile
# unit tests for deprecated.analyze.msd
class analyze_msd_tests (unittest.TestCase):
def setUp(self):
print
self.s = deprecated.init.create_random(N=100, phi_p=0.05);
hoomd.context.current.sorter.set_params(grid=8)
if hoomd.comm.get_rank() == 0:
tmp = tempfile.mkstemp(suffix='.test.log');
self.tmp_file = tmp[1];
else:
self.tmp_file = "invalid";
# tests basic creation of the analyzer
def test(self):
deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()]);
hoomd.run(100);
# tests with phase
def test_phase(self):
deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()], phase=0);
hoomd.run(100);
# test variable period
def test_variable(self):
deprecated.analyze.msd(period = lambda n: n*10, filename=self.tmp_file, groups=[hoomd.group.all()]);
hoomd.run(100);
# test error if no groups defined
def test_no_gropus(self):
self.assertRaises(RuntimeError, deprecated.analyze.msd, period=10, filename=self.tmp_file, groups=[]);
# test set_params
def test_set_params(self):
ana = deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()]);
ana.set_params(delimiter = ' ');
hoomd.run(100);
# test behavior upon changing number of particles
def test_change_num_ptls(self):
self.s.particles.types.add('B')
self.s.particles.add('B')
groupA = hoomd.group.type('A',update=True)
groupB = hoomd.group.type('B',update=True)
self.assertEqual(len(groupA),100)
self.assertEqual(len(groupB),1)
ana_A_ = deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[groupA]);
self.s.particles.add('B')
ana_B = deprecated.analyze.msd(period = 10, filename=self.tmp_file+'_B', groups=[groupB]);
self.assertRaises(RuntimeError,self.s.particles.add, type='B')
if hoomd.comm.get_rank() == 0:
os.remove(self.tmp_file+'_B');
def tearDown(self):
hoomd.context.initialize();
if hoomd.comm.get_rank() == 0:
os.remove(self.tmp_file);
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| # -*- coding: iso-8859-1 -*-
# Maintainer: joaander
import hoomd;
hoomd.context.initialize()
from hoomd import deprecated
import unittest
import os
import tempfile
# unit tests for deprecated.analyze.msd
class analyze_msd_tests (unittest.TestCase):
def setUp(self):
print
self.s = deprecated.init.create_random(N=100, phi_p=0.05);
hoomd.context.current.sorter.set_params(grid=8)
if hoomd.comm.get_rank() == 0:
tmp = tempfile.mkstemp(suffix='.test.log');
self.tmp_file = tmp[1];
else:
self.tmp_file = "invalid";
# tests basic creation of the analyzer
def test(self):
deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()]);
hoomd.run(100);
# tests with phase
def test_phase(self):
deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()], phase=0);
hoomd.run(100);
# test variable period
def test_variable(self):
deprecated.analyze.msd(period = lambda n: n*10, filename=self.tmp_file, groups=[hoomd.group.all()]);
hoomd.run(100);
# test error if no groups defined
def test_no_gropus(self):
self.assertRaises(RuntimeError, deprecated.analyze.msd, period=10, filename=self.tmp_file, groups=[]);
# test set_params
def test_set_params(self):
ana = deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[hoomd.group.all()]);
ana.set_params(delimiter = ' ');
hoomd.run(100);
# test behavior upon changing number of particles
def test_change_num_ptls(self):
self.s.particles.types.add('B')
self.s.particles.add('B')
groupA = hoomd.group.type('A',update=True)
groupB = hoomd.group.type('B',update=True)
self.assertEqual(len(groupA),100)
self.assertEqual(len(groupB),1)
ana_A_ = deprecated.analyze.msd(period = 10, filename=self.tmp_file, groups=[groupA]);
self.s.particles.add('B')
ana_B = deprecated.analyze.msd(period = 10, filename=self.tmp_file+'_B', groups=[groupB]);
self.assertRaises(RuntimeError,self.s.particles.add, type='B')
if hoomd.comm.get_rank() == 0:
os.remove(self.tmp_file+'_B');
def tearDown(self):
hoomd.context.initialize();
if hoomd.comm.get_rank() == 0:
os.remove(self.tmp_file);
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v']) | en | 0.561726 | # -*- coding: iso-8859-1 -*- # Maintainer: joaander # unit tests for deprecated.analyze.msd # tests basic creation of the analyzer # tests with phase # test variable period # test error if no groups defined # test set_params # test behavior upon changing number of particles | 2.379346 | 2 |
exchange_parsers/kraken.py | kelbykleinsasser/simple_taxes | 39 | 6615354 | <gh_stars>10-100
from decimal import Decimal
from datetime import datetime
from document_parser import DocumentParser
class KrakenParser(DocumentParser):
def __init__(self, *args, **kwargs):
kwargs['exchange_name'] = 'kraken'
kwargs['header'] = {
'created_at': 'time',
'amount': 'vol',
'fill_amount': 'cost',
'currency_pair': 'pair',
'type': 'type',
'price': 'price',
}
kwargs['header_rows'] = 0
super(KrakenParser, self).__init__(*args, **kwargs)
def process_row(self, row):
row['created_at'] = datetime.strptime(row['created_at'], '%Y-%m-%d %H:%M:%S.%f')
row['type'] = row['type'].lower()
row['currency_pair'] = self.process_currency_pair(row)
for key in ('amount', 'fill_amount', 'price'):
row[key] = abs(Decimal(row[key]))
return row
def process_currency_pair(self, row):
first = row['currency_pair'].split("X")[1].replace('BT', 'BTC')
second = row['currency_pair'].split("X")[-1].replace('BT', 'BTC')
return first + '-' + second
| from decimal import Decimal
from datetime import datetime
from document_parser import DocumentParser
class KrakenParser(DocumentParser):
def __init__(self, *args, **kwargs):
kwargs['exchange_name'] = 'kraken'
kwargs['header'] = {
'created_at': 'time',
'amount': 'vol',
'fill_amount': 'cost',
'currency_pair': 'pair',
'type': 'type',
'price': 'price',
}
kwargs['header_rows'] = 0
super(KrakenParser, self).__init__(*args, **kwargs)
def process_row(self, row):
row['created_at'] = datetime.strptime(row['created_at'], '%Y-%m-%d %H:%M:%S.%f')
row['type'] = row['type'].lower()
row['currency_pair'] = self.process_currency_pair(row)
for key in ('amount', 'fill_amount', 'price'):
row[key] = abs(Decimal(row[key]))
return row
def process_currency_pair(self, row):
first = row['currency_pair'].split("X")[1].replace('BT', 'BTC')
second = row['currency_pair'].split("X")[-1].replace('BT', 'BTC')
return first + '-' + second | none | 1 | 2.676096 | 3 | |
src/archtive_code/test3.py | Musyue/mobile_robot | 6 | 6615355 | #! /usr/bin/env python
# coding=utf-8
from ctypes import *
import time
VCI_USBCAN2A = 4
STATUS_OK = 1
# class VCI_INIT_CONFIG(Structure):
# _fields_ = [("AccCode", c_ulong),
# ("AccMask", c_ulong),
# ("Reserved", c_ulong),
# ("Filter", c_ubyte),
# ("Timing0", c_ubyte),
# ("Timing1", c_ubyte),
# ("Mode", c_ubyte)
# ]
# class VCI_CAN_OBJ(Structure):
# _fields_ = [("ID", c_uint),
# ("TimeStamp", c_uint),
# ("TimeFlag", c_ubyte),
# ("SendType", c_ubyte),
# ("RemoteFlag", c_ubyte),
# ("ExternFlag", c_ubyte),
# ("DataLen", c_ubyte),
# ("Data", c_ubyte*8),
# ("Reserved", c_ubyte*3)
# ]
class VCI_BOARD_INFO(Structure):
_fields_ = [('hw_Version',c_ushort),
('fw_Version',c_ushort),
('dr_Version',c_ushort),
('in_Version',c_ushort),
('irq_Num',c_ushort),
('can_Num',c_byte),
('str_Serial_Num',c_char*20),
('str_hw_Type',c_char*40),
('Reserved',c_ushort*4)
]
class VCI_CAN_OBJ(Structure):
_fields_ = [('ID',c_uint),
('TimeStamp',c_uint),
('TimeFlag',c_byte),
('SendType',c_byte),
('RemoteFlag',c_byte),
('ExternFlag',c_byte),
('DataLen',c_byte),
('Data',c_ubyte*8),
('Reserved',c_ubyte*3)
]
class VCI_INIT_CONFIG(Structure):
_fields_ = [('AccCode',c_uint),
('AccMask',c_uint),
('Reserved',c_uint),
('Filter',c_ubyte),
('Timing0',c_ubyte),
('Timing1',c_ubyte),
('Mode',c_ubyte)
]
canDLL = cdll.LoadLibrary('../lib/libcontrolcan.so')
# canDLL = windll.LoadLibrary(CanDLLName)
# print(CanDLLName)
ret = canDLL.VCI_OpenDevice(VCI_USBCAN2A, 0, 0)
print(ret)
if ret != STATUS_OK:
print('调用 VCI_OpenDevice出错\r\n')
#初始0通道
vci_initconfig = VCI_INIT_CONFIG(0x00000000, 0xFFFFFFFF, 0,0, 0x00, 0x1C, 0)
ret = canDLL.VCI_InitCAN(4, 0, 0, byref(vci_initconfig))
if ret != STATUS_OK:
print('调用 VCI_InitCAN出错\r\n')
ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 0)
if ret != STATUS_OK:
print('调用 VCI_StartCAN出错\r\n')
# #初始1通道
# ret = canDLL.VCI_InitCAN(VCI_USBCAN2A, 0, 1, byref(vci_initconfig))
# if ret != STATUS_OK:
# print('调用 VCI_InitCAN 1 出错\r\n')
# ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 1)
# if ret != STATUS_OK:
# print('调用 VCI_StartCAN 1 出错\r\n')
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x01,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_1 = VCI_CAN_OBJ(1, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_1), 1)
time.sleep(0.3)
print("i send data 1",ret)
#
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x02,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_2 = VCI_CAN_OBJ(2, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_2), 1)
time.sleep(0.3)
print("i send data 2",ret)
####
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x03,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_3 = VCI_CAN_OBJ(3, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_3), 1)
time.sleep(0.3)
print("i send data 3",ret)
###
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x04,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_4 = VCI_CAN_OBJ(4, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_4), 1)
print("i send data 4",ret)
if ret != STATUS_OK:
print('调用 VCI_Transmit 出错\r\n')
time.sleep(0.3)
#通道1接收数据
time.sleep(1)
a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0)
vci_can_obj_01 = VCI_CAN_OBJ(0x0, 0, 0, 0, 0, 0, 8, a, b)
vci_can_obj_02=VCI_CAN_OBJ()
vci_can_obj_03=VCI_CAN_OBJ()
vci_can_obj_04=VCI_CAN_OBJ()
# elems = (POINTER(VCI_CAN_OBJ) * 2500)()
# vci_can_obj_arrar=cast(elems,POINTER(POINTER(VCI_CAN_OBJ)))
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_arrar), 2500, 0)
# print("loop out data",ret)
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_01), 1, 0)
time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0)
# time.sleep(0.1)
while ret <= 0:
print('调用 VCI_Receive 出错\r\n')
ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_1), 1)
print("i send data 1",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_2), 1)
# print("i send data 2",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_3), 1)
# print("i send data 3",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_4), 1)
# print("i send data 4",ret)
# if ret != STATUS_OK:
# print('调用 VCI_Transmit 出错\r\n')
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_01), 1, 0)
time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0)
# time.sleep(0.1)
# time.sleep(0.3)
# a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0)
# vci_can_obj = VCI_CAN_OBJ(0x0, 0, 0, 0, 0, 0, 8, a, b)
while 1:
if ret > 0:
print("I receive data la",ret)
# print("I receive",len(vci_can_obj_arrar))
# for i in len(vci_can_obj_arrar):
# print("my data",list(vci_can_obj_arrar[0].Data))
print(vci_can_obj_01.DataLen)
print('my data',list(vci_can_obj_01.Data))
# print(vci_can_obj_02.DataLen)
# print('my data',list(vci_can_obj_02.Data))
# print(vci_can_obj_03.DataLen)
# print('my data',list(vci_can_obj_03.Data))
# print(vci_can_obj_04.DataLen)
# print('my data',list(vci_can_obj_04.Data))
# for i in list(vci_can_obj.Data):
# print "i",hex(i)
time.sleep(0.5)
#关闭
canDLL.VCI_CloseDevice(VCI_USBCAN2A, 0) | #! /usr/bin/env python
# coding=utf-8
from ctypes import *
import time
VCI_USBCAN2A = 4
STATUS_OK = 1
# class VCI_INIT_CONFIG(Structure):
# _fields_ = [("AccCode", c_ulong),
# ("AccMask", c_ulong),
# ("Reserved", c_ulong),
# ("Filter", c_ubyte),
# ("Timing0", c_ubyte),
# ("Timing1", c_ubyte),
# ("Mode", c_ubyte)
# ]
# class VCI_CAN_OBJ(Structure):
# _fields_ = [("ID", c_uint),
# ("TimeStamp", c_uint),
# ("TimeFlag", c_ubyte),
# ("SendType", c_ubyte),
# ("RemoteFlag", c_ubyte),
# ("ExternFlag", c_ubyte),
# ("DataLen", c_ubyte),
# ("Data", c_ubyte*8),
# ("Reserved", c_ubyte*3)
# ]
class VCI_BOARD_INFO(Structure):
_fields_ = [('hw_Version',c_ushort),
('fw_Version',c_ushort),
('dr_Version',c_ushort),
('in_Version',c_ushort),
('irq_Num',c_ushort),
('can_Num',c_byte),
('str_Serial_Num',c_char*20),
('str_hw_Type',c_char*40),
('Reserved',c_ushort*4)
]
class VCI_CAN_OBJ(Structure):
_fields_ = [('ID',c_uint),
('TimeStamp',c_uint),
('TimeFlag',c_byte),
('SendType',c_byte),
('RemoteFlag',c_byte),
('ExternFlag',c_byte),
('DataLen',c_byte),
('Data',c_ubyte*8),
('Reserved',c_ubyte*3)
]
class VCI_INIT_CONFIG(Structure):
_fields_ = [('AccCode',c_uint),
('AccMask',c_uint),
('Reserved',c_uint),
('Filter',c_ubyte),
('Timing0',c_ubyte),
('Timing1',c_ubyte),
('Mode',c_ubyte)
]
canDLL = cdll.LoadLibrary('../lib/libcontrolcan.so')
# canDLL = windll.LoadLibrary(CanDLLName)
# print(CanDLLName)
ret = canDLL.VCI_OpenDevice(VCI_USBCAN2A, 0, 0)
print(ret)
if ret != STATUS_OK:
print('调用 VCI_OpenDevice出错\r\n')
#初始0通道
vci_initconfig = VCI_INIT_CONFIG(0x00000000, 0xFFFFFFFF, 0,0, 0x00, 0x1C, 0)
ret = canDLL.VCI_InitCAN(4, 0, 0, byref(vci_initconfig))
if ret != STATUS_OK:
print('调用 VCI_InitCAN出错\r\n')
ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 0)
if ret != STATUS_OK:
print('调用 VCI_StartCAN出错\r\n')
# #初始1通道
# ret = canDLL.VCI_InitCAN(VCI_USBCAN2A, 0, 1, byref(vci_initconfig))
# if ret != STATUS_OK:
# print('调用 VCI_InitCAN 1 出错\r\n')
# ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 1)
# if ret != STATUS_OK:
# print('调用 VCI_StartCAN 1 出错\r\n')
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x01,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_1 = VCI_CAN_OBJ(1, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_1), 1)
time.sleep(0.3)
print("i send data 1",ret)
#
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x02,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_2 = VCI_CAN_OBJ(2, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_2), 1)
time.sleep(0.3)
print("i send data 2",ret)
####
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x03,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_3 = VCI_CAN_OBJ(3, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_3), 1)
time.sleep(0.3)
print("i send data 3",ret)
###
#通道0发送数据
ubyte_array = c_ubyte*8
a = ubyte_array(0x04,0x04,0x01,0x00)
ubyte_3array = c_ubyte*3
b = ubyte_3array(0, 0 , 0)
vci_can_obj_4 = VCI_CAN_OBJ(4, 0, 0, 0, 0, 0, 8, a, b)
ret = canDLL.VCI_Transmit(4, 0, 0, byref(vci_can_obj_4), 1)
print("i send data 4",ret)
if ret != STATUS_OK:
print('调用 VCI_Transmit 出错\r\n')
time.sleep(0.3)
#通道1接收数据
time.sleep(1)
a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0)
vci_can_obj_01 = VCI_CAN_OBJ(0x0, 0, 0, 0, 0, 0, 8, a, b)
vci_can_obj_02=VCI_CAN_OBJ()
vci_can_obj_03=VCI_CAN_OBJ()
vci_can_obj_04=VCI_CAN_OBJ()
# elems = (POINTER(VCI_CAN_OBJ) * 2500)()
# vci_can_obj_arrar=cast(elems,POINTER(POINTER(VCI_CAN_OBJ)))
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_arrar), 2500, 0)
# print("loop out data",ret)
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_01), 1, 0)
time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0)
# time.sleep(0.1)
while ret <= 0:
print('调用 VCI_Receive 出错\r\n')
ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_1), 1)
print("i send data 1",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_2), 1)
# print("i send data 2",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_3), 1)
# print("i send data 3",ret)
# ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_4), 1)
# print("i send data 4",ret)
# if ret != STATUS_OK:
# print('调用 VCI_Transmit 出错\r\n')
ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_01), 1, 0)
time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0)
# time.sleep(0.1)
# ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0)
# time.sleep(0.1)
# time.sleep(0.3)
# a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0)
# vci_can_obj = VCI_CAN_OBJ(0x0, 0, 0, 0, 0, 0, 8, a, b)
while 1:
if ret > 0:
print("I receive data la",ret)
# print("I receive",len(vci_can_obj_arrar))
# for i in len(vci_can_obj_arrar):
# print("my data",list(vci_can_obj_arrar[0].Data))
print(vci_can_obj_01.DataLen)
print('my data',list(vci_can_obj_01.Data))
# print(vci_can_obj_02.DataLen)
# print('my data',list(vci_can_obj_02.Data))
# print(vci_can_obj_03.DataLen)
# print('my data',list(vci_can_obj_03.Data))
# print(vci_can_obj_04.DataLen)
# print('my data',list(vci_can_obj_04.Data))
# for i in list(vci_can_obj.Data):
# print "i",hex(i)
time.sleep(0.5)
#关闭
canDLL.VCI_CloseDevice(VCI_USBCAN2A, 0) | en | 0.279537 | #! /usr/bin/env python # coding=utf-8 # class VCI_INIT_CONFIG(Structure): # _fields_ = [("AccCode", c_ulong), # ("AccMask", c_ulong), # ("Reserved", c_ulong), # ("Filter", c_ubyte), # ("Timing0", c_ubyte), # ("Timing1", c_ubyte), # ("Mode", c_ubyte) # ] # class VCI_CAN_OBJ(Structure): # _fields_ = [("ID", c_uint), # ("TimeStamp", c_uint), # ("TimeFlag", c_ubyte), # ("SendType", c_ubyte), # ("RemoteFlag", c_ubyte), # ("ExternFlag", c_ubyte), # ("DataLen", c_ubyte), # ("Data", c_ubyte*8), # ("Reserved", c_ubyte*3) # ] # canDLL = windll.LoadLibrary(CanDLLName) # print(CanDLLName) #初始0通道 # #初始1通道 # ret = canDLL.VCI_InitCAN(VCI_USBCAN2A, 0, 1, byref(vci_initconfig)) # if ret != STATUS_OK: # print('调用 VCI_InitCAN 1 出错\r\n') # ret = canDLL.VCI_StartCAN(VCI_USBCAN2A, 0, 1) # if ret != STATUS_OK: # print('调用 VCI_StartCAN 1 出错\r\n') #通道0发送数据 # #通道0发送数据 #### #通道0发送数据 ### #通道0发送数据 #通道1接收数据 # elems = (POINTER(VCI_CAN_OBJ) * 2500)() # vci_can_obj_arrar=cast(elems,POINTER(POINTER(VCI_CAN_OBJ))) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_arrar), 2500, 0) # print("loop out data",ret) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0) # time.sleep(0.1) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0) # time.sleep(0.1) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0) # time.sleep(0.1) # ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_2), 1) # print("i send data 2",ret) # ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_3), 1) # print("i send data 3",ret) # ret = canDLL.VCI_Transmit(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_4), 1) # print("i send data 4",ret) # if ret != STATUS_OK: # print('调用 VCI_Transmit 出错\r\n') # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_02), 1, 0) # time.sleep(0.1) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_03), 1, 0) # time.sleep(0.1) # ret = canDLL.VCI_Receive(VCI_USBCAN2A, 0, 0, byref(vci_can_obj_04), 1, 0) # time.sleep(0.1) # time.sleep(0.3) # a = ubyte_array(0, 0, 0, 0, 0, 0, 0, 0) # vci_can_obj = VCI_CAN_OBJ(0x0, 0, 0, 0, 0, 0, 8, a, b) # print("I receive",len(vci_can_obj_arrar)) # for i in len(vci_can_obj_arrar): # print("my data",list(vci_can_obj_arrar[0].Data)) # print(vci_can_obj_02.DataLen) # print('my data',list(vci_can_obj_02.Data)) # print(vci_can_obj_03.DataLen) # print('my data',list(vci_can_obj_03.Data)) # print(vci_can_obj_04.DataLen) # print('my data',list(vci_can_obj_04.Data)) # for i in list(vci_can_obj.Data): # print "i",hex(i) #关闭 | 1.963969 | 2 |
inheritance.py | kmarcini/Learn-Python---Full-Course-for-Beginners-Tutorial- | 0 | 6615356 | from Chef import Chef
from Chinese_Chef import ChineseChef
myChef = Chef()
myChef.make_chicken()
myChef.make_special_dish()
myChef2 = ChineseChef()
myChef2.make_special_dish()
| from Chef import Chef
from Chinese_Chef import ChineseChef
myChef = Chef()
myChef.make_chicken()
myChef.make_special_dish()
myChef2 = ChineseChef()
myChef2.make_special_dish()
| none | 1 | 1.56749 | 2 | |
django_gumroad/users/migrations/0003_auto_20210505_1527.py | romien94/django_gumroad | 0 | 6615357 | <reponame>romien94/django_gumroad<filename>django_gumroad/users/migrations/0003_auto_20210505_1527.py
# Generated by Django 3.1.9 on 2021-05-05 12:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_userlibrary'),
]
operations = [
migrations.AlterModelOptions(
name='userlibrary',
options={'verbose_name_plural': 'User libraries'},
),
]
| # Generated by Django 3.1.9 on 2021-05-05 12:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_userlibrary'),
]
operations = [
migrations.AlterModelOptions(
name='userlibrary',
options={'verbose_name_plural': 'User libraries'},
),
] | en | 0.804231 | # Generated by Django 3.1.9 on 2021-05-05 12:27 | 1.434478 | 1 |
load_data.py | KnightZhang625/DARTS_NAS_TextClassification | 2 | 6615358 | <gh_stars>1-10
# encoding:utf-8
import copy
import codecs
import pickle
import random
import numpy as np
from pathlib import Path
cur_path = Path(__file__).absolute().parent
with codecs.open(cur_path / 'data/embedding.pt', 'rb') as file:
EMBEDDING = pickle.load(file)
MAX_LEN = 20
class Data(object):
def __init__(self, data_path, label_path):
self.train_X, self.train_y = self._read(data_path), self._read(label_path)
assert len(self.train_X) == len(self.train_y)
def _read(self, path):
with codecs.open(path, 'r', 'utf-8') as file:
lines = file.read().split('\n')[:-1]
return lines
def get(self, batch_size):
"""do not use test data here"""
self._shuffle()
data_size = len(self.train_X)
if data_size % batch_size == 0:
batch_number = int(data_size / batch_size)
else:
batch_number = int(data_size // batch_size + 1)
for bn in range(batch_number):
if bn < batch_number - 1:
start = bn * batch_size
end = start + batch_size
else:
start = bn * batch_size
end = None
train_X_batch = self.train_X[start : end]
train_y_batch = self.train_y[start : end]
train_X_processed = np.expand_dims(np.array(list(map(self._convert_to_embedding, train_X_batch))), axis=1)
train_y_processed = np.array(list(map(self._convert_to_int, train_y_batch)))
yield(train_X_processed, train_y_processed)
def _convert_to_embedding(self, sentence):
if len(sentence) < MAX_LEN:
padding_str = ''.join(['*' for _ in range(MAX_LEN - len(sentence))])
sentence += padding_str
else:
sentence = sentence[:MAX_LEN]
assert len(sentence) == MAX_LEN
array = []
for vocab in sentence:
if vocab != '*' and vocab in EMBEDDING:
array.append(EMBEDDING[vocab])
elif vocab == '*':
array.append(EMBEDDING['<pad>'])
else:
array.append(EMBEDDING['<unk>'])
return np.array(array)
def _convert_to_int(self, label):
assert type(label) == str
assert len(label) <= 2
return int(label)
def _shuffle(self):
data_label = list(zip(self.train_X, self.train_y))
random.shuffle(data_label)
train_X_temp, train_y_temp = zip(*data_label)
self.train_X = copy.deepcopy(list(train_X_temp))
self.train_y = copy.deepcopy(list(train_y_temp))
def __len__(self):
return len(self.train_X)
if __name__ == '__main__':
data = Data('train_x', 'train_y')
for (X, y) in data.get(3):
print(X[0:2, :].shape)
print(type(y))
input() | # encoding:utf-8
import copy
import codecs
import pickle
import random
import numpy as np
from pathlib import Path
cur_path = Path(__file__).absolute().parent
with codecs.open(cur_path / 'data/embedding.pt', 'rb') as file:
EMBEDDING = pickle.load(file)
MAX_LEN = 20
class Data(object):
def __init__(self, data_path, label_path):
self.train_X, self.train_y = self._read(data_path), self._read(label_path)
assert len(self.train_X) == len(self.train_y)
def _read(self, path):
with codecs.open(path, 'r', 'utf-8') as file:
lines = file.read().split('\n')[:-1]
return lines
def get(self, batch_size):
"""do not use test data here"""
self._shuffle()
data_size = len(self.train_X)
if data_size % batch_size == 0:
batch_number = int(data_size / batch_size)
else:
batch_number = int(data_size // batch_size + 1)
for bn in range(batch_number):
if bn < batch_number - 1:
start = bn * batch_size
end = start + batch_size
else:
start = bn * batch_size
end = None
train_X_batch = self.train_X[start : end]
train_y_batch = self.train_y[start : end]
train_X_processed = np.expand_dims(np.array(list(map(self._convert_to_embedding, train_X_batch))), axis=1)
train_y_processed = np.array(list(map(self._convert_to_int, train_y_batch)))
yield(train_X_processed, train_y_processed)
def _convert_to_embedding(self, sentence):
if len(sentence) < MAX_LEN:
padding_str = ''.join(['*' for _ in range(MAX_LEN - len(sentence))])
sentence += padding_str
else:
sentence = sentence[:MAX_LEN]
assert len(sentence) == MAX_LEN
array = []
for vocab in sentence:
if vocab != '*' and vocab in EMBEDDING:
array.append(EMBEDDING[vocab])
elif vocab == '*':
array.append(EMBEDDING['<pad>'])
else:
array.append(EMBEDDING['<unk>'])
return np.array(array)
def _convert_to_int(self, label):
assert type(label) == str
assert len(label) <= 2
return int(label)
def _shuffle(self):
data_label = list(zip(self.train_X, self.train_y))
random.shuffle(data_label)
train_X_temp, train_y_temp = zip(*data_label)
self.train_X = copy.deepcopy(list(train_X_temp))
self.train_y = copy.deepcopy(list(train_y_temp))
def __len__(self):
return len(self.train_X)
if __name__ == '__main__':
data = Data('train_x', 'train_y')
for (X, y) in data.get(3):
print(X[0:2, :].shape)
print(type(y))
input() | en | 0.62124 | # encoding:utf-8 do not use test data here | 2.451476 | 2 |
nets/NetOneLayer.py | vkhachatryan/CNM | 6 | 6615359 | import torch
import torch.nn as nn
import torch.nn.functional as F
class NetOneLayer(nn.Module):
def __init__(self, n_hidden=2**8):
# n_hidden - number of hidden units
super(NetOneLayer, self).__init__()
self.n_hidden = n_hidden
self.W1 = nn.Parameter(torch.randn(784, self.n_hidden, requires_grad=True))
self.W2 = nn.Parameter(torch.randn(self.n_hidden, 10, requires_grad=True))
def forward(self, x):
# x has shape (n_samples, 1, 28, 28)
x = x.view(x.size()[0], -1)
x = torch.sigmoid(x.mm(self.W1))
y_pred = F.softmax(x.mm(self.W2), dim=1)
return y_pred | import torch
import torch.nn as nn
import torch.nn.functional as F
class NetOneLayer(nn.Module):
def __init__(self, n_hidden=2**8):
# n_hidden - number of hidden units
super(NetOneLayer, self).__init__()
self.n_hidden = n_hidden
self.W1 = nn.Parameter(torch.randn(784, self.n_hidden, requires_grad=True))
self.W2 = nn.Parameter(torch.randn(self.n_hidden, 10, requires_grad=True))
def forward(self, x):
# x has shape (n_samples, 1, 28, 28)
x = x.view(x.size()[0], -1)
x = torch.sigmoid(x.mm(self.W1))
y_pred = F.softmax(x.mm(self.W2), dim=1)
return y_pred | en | 0.7845 | # n_hidden - number of hidden units # x has shape (n_samples, 1, 28, 28) | 3.108112 | 3 |
whendo/core/hooks.py | electronhead/whendo | 1 | 6615360 | <reponame>electronhead/whendo
from datetime import datetime
from typing import Callable, Dict, Set, Optional
from .util import KeyTagMode
class DispatcherHooks:
schedule_program_thunk: Callable
unschedule_program_thunk: Callable
unschedule_active_program_thunk: Callable
schedule_action_thunk: Callable
unschedule_scheduler_action_thunk: Callable
unschedule_scheduler_thunk: Callable
defer_action_thunk: Callable
expire_action_thunk: Callable
clear_all_deferred_actions_thunk: Callable
clear_all_expiring_actions_thunk: Callable
get_server_thunk: Callable
get_servers_thunk: Callable
get_servers_by_tags_thunk: Callable
get_action_thunk: Callable
clear_all_scheduling_thunk: Callable
unschedule_all_schedulers_thunk: Callable
get_scheduling_info_thunk: Callable
get_dispatcher_dump_thunk: Callable
@classmethod
def init(
cls,
schedule_program_thunk: Callable,
unschedule_program_thunk: Callable,
unschedule_active_program_thunk: Callable,
schedule_action_thunk: Callable,
unschedule_scheduler_action_thunk: Callable,
unschedule_scheduler_thunk: Callable,
defer_action_thunk: Callable,
expire_action_thunk: Callable,
clear_all_deferred_actions_thunk: Callable,
clear_all_expiring_actions_thunk: Callable,
get_server_thunk: Callable,
get_servers_thunk: Callable,
get_servers_by_tags_thunk: Callable,
get_action_thunk: Callable,
clear_all_scheduling_thunk: Callable,
unschedule_all_schedulers_thunk: Callable,
get_scheduling_info_thunk: Callable,
get_dispatcher_dump_thunk: Callable,
):
cls.schedule_program_thunk = schedule_program_thunk
cls.unschedule_program_thunk = unschedule_program_thunk
cls.unschedule_active_program_thunk = unschedule_active_program_thunk
cls.schedule_action_thunk = schedule_action_thunk
cls.unschedule_scheduler_action_thunk = unschedule_scheduler_action_thunk
cls.unschedule_scheduler_thunk = unschedule_scheduler_thunk
cls.defer_action_thunk = defer_action_thunk
cls.expire_action_thunk = expire_action_thunk
cls.clear_all_deferred_actions_thunk = clear_all_deferred_actions_thunk
cls.clear_all_expiring_actions_thunk = clear_all_expiring_actions_thunk
cls.get_server_thunk = get_server_thunk
cls.get_servers_thunk = get_servers_thunk
cls.get_servers_by_tags_thunk = get_servers_by_tags_thunk
cls.get_action_thunk = get_action_thunk
cls.clear_all_scheduling_thunk = clear_all_scheduling_thunk
cls.unschedule_all_schedulers_thunk = unschedule_all_schedulers_thunk
cls.get_scheduling_info_thunk = get_scheduling_info_thunk
cls.get_dispatcher_dump_thunk = get_dispatcher_dump_thunk
@classmethod
def schedule_program(cls, program_name: str, start: datetime, stop: datetime):
return cls.schedule_program_thunk(
program_name=program_name, start=start, stop=stop
)
@classmethod
def unschedule_program(cls, program_name: str):
return cls.unschedule_program_thunk(program_name=program_name)
@classmethod
def unschedule_active_program(cls, program_name: str):
return cls.unschedule_active_program_thunk(program_name=program_name)
@classmethod
def schedule_action(cls, scheduler_name: str, action_name: str):
return cls.schedule_action_thunk(
scheduler_name=scheduler_name, action_name=action_name
)
@classmethod
def unschedule_scheduler_action(cls, scheduler_name: str, action_name: str):
return cls.unschedule_scheduler_action_thunk(
scheduler_name=scheduler_name, action_name=action_name
)
@classmethod
def unschedule_scheduler(cls, scheduler_name: str):
return cls.unschedule_scheduler_thunk(scheduler_name=scheduler_name)
@classmethod
def defer_action(cls, scheduler_name: str, action_name: str, wait_until: datetime):
return cls.defer_action_thunk(
scheduler_name=scheduler_name,
action_name=action_name,
wait_until=wait_until,
)
@classmethod
def expire_action(cls, scheduler_name: str, action_name: str, expire_on: datetime):
return cls.expire_action_thunk(
scheduler_name=scheduler_name, action_name=action_name, expire_on=expire_on
)
@classmethod
def clear_all_deferred_actions(cls):
return cls.clear_all_deferred_actions_thunk()
@classmethod
def clear_all_expiring_actions(cls):
return cls.clear_all_expiring_actions_thunk()
@classmethod
def get_server(cls, server_name: str):
return cls.get_server_thunk(server_name=server_name)
@classmethod
def get_servers(cls):
return cls.get_servers_thunk()
@classmethod
def get_servers_by_tags(
cls,
key_tags: Optional[Dict[str, Set[str]]] = None,
key_tag_mode: KeyTagMode = KeyTagMode.ANY,
):
return cls.get_servers_by_tags_thunk(
key_tags=key_tags, key_tag_mode=key_tag_mode
)
@classmethod
def get_action(cls, action_name: str):
return cls.get_action_thunk(action_name=action_name)
@classmethod
def clear_all_scheduling(cls):
return cls.clear_all_scheduling_thunk()
@classmethod
def unschedule_all_schedulers(cls):
return cls.unschedule_all_schedulers_thunk()
@classmethod
def get_scheduling_info(cls):
return cls.get_scheduling_info_thunk()
@classmethod
def get_dispatcher_dump(cls):
return cls.get_dispatcher_dump_thunk()
| from datetime import datetime
from typing import Callable, Dict, Set, Optional
from .util import KeyTagMode
class DispatcherHooks:
schedule_program_thunk: Callable
unschedule_program_thunk: Callable
unschedule_active_program_thunk: Callable
schedule_action_thunk: Callable
unschedule_scheduler_action_thunk: Callable
unschedule_scheduler_thunk: Callable
defer_action_thunk: Callable
expire_action_thunk: Callable
clear_all_deferred_actions_thunk: Callable
clear_all_expiring_actions_thunk: Callable
get_server_thunk: Callable
get_servers_thunk: Callable
get_servers_by_tags_thunk: Callable
get_action_thunk: Callable
clear_all_scheduling_thunk: Callable
unschedule_all_schedulers_thunk: Callable
get_scheduling_info_thunk: Callable
get_dispatcher_dump_thunk: Callable
@classmethod
def init(
cls,
schedule_program_thunk: Callable,
unschedule_program_thunk: Callable,
unschedule_active_program_thunk: Callable,
schedule_action_thunk: Callable,
unschedule_scheduler_action_thunk: Callable,
unschedule_scheduler_thunk: Callable,
defer_action_thunk: Callable,
expire_action_thunk: Callable,
clear_all_deferred_actions_thunk: Callable,
clear_all_expiring_actions_thunk: Callable,
get_server_thunk: Callable,
get_servers_thunk: Callable,
get_servers_by_tags_thunk: Callable,
get_action_thunk: Callable,
clear_all_scheduling_thunk: Callable,
unschedule_all_schedulers_thunk: Callable,
get_scheduling_info_thunk: Callable,
get_dispatcher_dump_thunk: Callable,
):
cls.schedule_program_thunk = schedule_program_thunk
cls.unschedule_program_thunk = unschedule_program_thunk
cls.unschedule_active_program_thunk = unschedule_active_program_thunk
cls.schedule_action_thunk = schedule_action_thunk
cls.unschedule_scheduler_action_thunk = unschedule_scheduler_action_thunk
cls.unschedule_scheduler_thunk = unschedule_scheduler_thunk
cls.defer_action_thunk = defer_action_thunk
cls.expire_action_thunk = expire_action_thunk
cls.clear_all_deferred_actions_thunk = clear_all_deferred_actions_thunk
cls.clear_all_expiring_actions_thunk = clear_all_expiring_actions_thunk
cls.get_server_thunk = get_server_thunk
cls.get_servers_thunk = get_servers_thunk
cls.get_servers_by_tags_thunk = get_servers_by_tags_thunk
cls.get_action_thunk = get_action_thunk
cls.clear_all_scheduling_thunk = clear_all_scheduling_thunk
cls.unschedule_all_schedulers_thunk = unschedule_all_schedulers_thunk
cls.get_scheduling_info_thunk = get_scheduling_info_thunk
cls.get_dispatcher_dump_thunk = get_dispatcher_dump_thunk
@classmethod
def schedule_program(cls, program_name: str, start: datetime, stop: datetime):
return cls.schedule_program_thunk(
program_name=program_name, start=start, stop=stop
)
@classmethod
def unschedule_program(cls, program_name: str):
return cls.unschedule_program_thunk(program_name=program_name)
@classmethod
def unschedule_active_program(cls, program_name: str):
return cls.unschedule_active_program_thunk(program_name=program_name)
@classmethod
def schedule_action(cls, scheduler_name: str, action_name: str):
return cls.schedule_action_thunk(
scheduler_name=scheduler_name, action_name=action_name
)
@classmethod
def unschedule_scheduler_action(cls, scheduler_name: str, action_name: str):
return cls.unschedule_scheduler_action_thunk(
scheduler_name=scheduler_name, action_name=action_name
)
@classmethod
def unschedule_scheduler(cls, scheduler_name: str):
return cls.unschedule_scheduler_thunk(scheduler_name=scheduler_name)
@classmethod
def defer_action(cls, scheduler_name: str, action_name: str, wait_until: datetime):
return cls.defer_action_thunk(
scheduler_name=scheduler_name,
action_name=action_name,
wait_until=wait_until,
)
@classmethod
def expire_action(cls, scheduler_name: str, action_name: str, expire_on: datetime):
return cls.expire_action_thunk(
scheduler_name=scheduler_name, action_name=action_name, expire_on=expire_on
)
@classmethod
def clear_all_deferred_actions(cls):
return cls.clear_all_deferred_actions_thunk()
@classmethod
def clear_all_expiring_actions(cls):
return cls.clear_all_expiring_actions_thunk()
@classmethod
def get_server(cls, server_name: str):
return cls.get_server_thunk(server_name=server_name)
@classmethod
def get_servers(cls):
return cls.get_servers_thunk()
@classmethod
def get_servers_by_tags(
cls,
key_tags: Optional[Dict[str, Set[str]]] = None,
key_tag_mode: KeyTagMode = KeyTagMode.ANY,
):
return cls.get_servers_by_tags_thunk(
key_tags=key_tags, key_tag_mode=key_tag_mode
)
@classmethod
def get_action(cls, action_name: str):
return cls.get_action_thunk(action_name=action_name)
@classmethod
def clear_all_scheduling(cls):
return cls.clear_all_scheduling_thunk()
@classmethod
def unschedule_all_schedulers(cls):
return cls.unschedule_all_schedulers_thunk()
@classmethod
def get_scheduling_info(cls):
return cls.get_scheduling_info_thunk()
@classmethod
def get_dispatcher_dump(cls):
return cls.get_dispatcher_dump_thunk() | none | 1 | 2.240656 | 2 | |
sfaira/data/dataloaders/loaders/d10_1126_science_abj4008/homosapiens_blood_2022_10x3v3_schmidt_001.py | johnmous/sfaira | 0 | 6615361 | import os
import pandas as pd
import scanpy as sc
def load(data_dir, sample_fn, **kwargs):
adata = sc.read_10x_mtx(data_dir, prefix="GSE190604_")
fn_meta = os.path.join(data_dir, "GSE190604_cellranger-guidecalls-aggregated-unfiltered.txt.gz")
tab_meta = pd.read_csv(fn_meta, compression="gzip", sep="\t")
tab_meta.index = tab_meta["cell_barcode"].values
del tab_meta["cell_barcode"]
adata.obs = pd.concat([adata.obs, tab_meta], axis=1)
return adata
| import os
import pandas as pd
import scanpy as sc
def load(data_dir, sample_fn, **kwargs):
adata = sc.read_10x_mtx(data_dir, prefix="GSE190604_")
fn_meta = os.path.join(data_dir, "GSE190604_cellranger-guidecalls-aggregated-unfiltered.txt.gz")
tab_meta = pd.read_csv(fn_meta, compression="gzip", sep="\t")
tab_meta.index = tab_meta["cell_barcode"].values
del tab_meta["cell_barcode"]
adata.obs = pd.concat([adata.obs, tab_meta], axis=1)
return adata
| none | 1 | 2.296808 | 2 | |
third_party/blink/tools/blinkpy/web_tests/try_flag_unittest.py | zealoussnow/chromium | 14,668 | 6615362 | <reponame>zealoussnow/chromium<filename>third_party/blink/tools/blinkpy/web_tests/try_flag_unittest.py
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.net.git_cl import TryJobStatus
from blinkpy.common.net.git_cl_mock import MockGitCL
from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.web_test_results import WebTestResults
from blinkpy.common.path_finder import PathFinder
from blinkpy.web_tests.try_flag import TryFlag
class TryFlagTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.linux_build = Build('linux-rel', 100)
self.mac_build = Build('mac-rel', 101)
self.win_build = Build('win7-rel', 102)
self.mock_try_results = {
self.linux_build: TryJobStatus('COMPLETED', 'SUCCESS'),
self.win_build: TryJobStatus('COMPLETED', 'SUCCESS'),
self.mac_build: TryJobStatus('COMPLETED', 'SUCCESS')
}
super(TryFlagTest, self).__init__(*args, **kwargs)
def _run_trigger_test(self, regenerate):
host = MockHost()
git = host.git()
git_cl = MockGitCL(host)
finder = PathFinder(host.filesystem)
flag_file = finder.path_from_web_tests(
'additional-driver-flag.setting')
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
cmd = ['trigger', '--flag=--foo']
if regenerate:
cmd.append('--regenerate')
TryFlag(cmd, host, git_cl).run()
expected_added_paths = {flag_file}
expected_commits = [[
'Flag try job: force --foo for run_web_tests.py.'
]]
if regenerate:
expected_added_paths.add(flag_expectations_file)
expected_commits.append(
['Flag try job: clear expectations for --foo.'])
self.assertEqual(git.added_paths, expected_added_paths)
self.assertEqual(git.local_commits(), expected_commits)
self.assertEqual(git_cl.calls, [[
'git', 'cl', 'upload', '--bypass-hooks', '-f', '-m',
'Flag try job for --foo.'
], [
'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'linux-rel'
], [
'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'mac-rel'
], ['git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'win7-rel']])
def test_trigger(self):
self._run_trigger_test(regenerate=False)
self._run_trigger_test(regenerate=True)
def _setup_mock_results(self, results_fetcher):
results_fetcher.set_results(
self.linux_build,
WebTestResults({
'tests': {
'something': {
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
},
'fail-win-and-linux.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
results_fetcher.set_results(
self.win_build,
WebTestResults({
'tests': {
'something': {
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
},
'fail-win-and-linux.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
results_fetcher.set_results(
self.mac_build,
WebTestResults({
'tests': {
'something': {
'pass-unexpectedly-mac.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
},
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
def test_update(self):
host = MockHost()
filesystem = host.filesystem
finder = PathFinder(filesystem)
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
filesystem.write_text_file(
flag_expectations_file,
'# results: [ Failure ]\nsomething/pass-unexpectedly-mac.html [ Failure ]'
)
self._setup_mock_results(host.results_fetcher)
cmd = ['update', '--flag=--foo']
TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()
def results_url(build):
return '%s/%s/%s/%s/layout-test-results/results.html' % (
'https://test-results.appspot.com/data/layout_results',
build.builder_name, build.build_number,
'blink_web_tests%20%28with%20patch%29')
self.assertEqual(
host.stdout.getvalue(), '\n'.join([
'Fetching results...',
'-- Linux: %s' % results_url(self.linux_build),
'-- Mac: %s' % results_url(self.mac_build),
'-- Win: %s' % results_url(self.win_build), '',
'### 1 unexpected passes:', '',
'[ Mac ] something/pass-unexpectedly-mac.html [ Pass ]', '',
'### 5 unexpected failures:', '',
'[ Linux ] something/fail-everywhere.html [ Failure ]',
'[ Mac ] something/fail-everywhere.html [ Failure ]',
'[ Win ] something/fail-everywhere.html [ Failure ]',
'[ Linux ] something/fail-win-and-linux.html [ Failure ]',
'[ Win ] something/fail-win-and-linux.html [ Failure ]', ''
]))
def test_update_irrelevant_unexpected_pass(self):
host = MockHost()
filesystem = host.filesystem
finder = PathFinder(filesystem)
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
self._setup_mock_results(host.results_fetcher)
cmd = ['update', '--flag=--foo']
# Unexpected passes that don't have flag-specific failure expectations
# should not be reported.
filesystem.write_text_file(flag_expectations_file, '')
TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()
self.assertTrue('### 0 unexpected passes' in host.stdout.getvalue())
def test_invalid_action(self):
host = MockHost()
cmd = ['invalid', '--flag=--foo']
TryFlag(cmd, host, MockGitCL(host)).run()
self.assertEqual(host.stderr.getvalue(),
'specify "trigger" or "update"\n')
| # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from blinkpy.common.host_mock import MockHost
from blinkpy.common.net.git_cl import TryJobStatus
from blinkpy.common.net.git_cl_mock import MockGitCL
from blinkpy.common.net.results_fetcher import Build
from blinkpy.common.net.web_test_results import WebTestResults
from blinkpy.common.path_finder import PathFinder
from blinkpy.web_tests.try_flag import TryFlag
class TryFlagTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.linux_build = Build('linux-rel', 100)
self.mac_build = Build('mac-rel', 101)
self.win_build = Build('win7-rel', 102)
self.mock_try_results = {
self.linux_build: TryJobStatus('COMPLETED', 'SUCCESS'),
self.win_build: TryJobStatus('COMPLETED', 'SUCCESS'),
self.mac_build: TryJobStatus('COMPLETED', 'SUCCESS')
}
super(TryFlagTest, self).__init__(*args, **kwargs)
def _run_trigger_test(self, regenerate):
host = MockHost()
git = host.git()
git_cl = MockGitCL(host)
finder = PathFinder(host.filesystem)
flag_file = finder.path_from_web_tests(
'additional-driver-flag.setting')
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
cmd = ['trigger', '--flag=--foo']
if regenerate:
cmd.append('--regenerate')
TryFlag(cmd, host, git_cl).run()
expected_added_paths = {flag_file}
expected_commits = [[
'Flag try job: force --foo for run_web_tests.py.'
]]
if regenerate:
expected_added_paths.add(flag_expectations_file)
expected_commits.append(
['Flag try job: clear expectations for --foo.'])
self.assertEqual(git.added_paths, expected_added_paths)
self.assertEqual(git.local_commits(), expected_commits)
self.assertEqual(git_cl.calls, [[
'git', 'cl', 'upload', '--bypass-hooks', '-f', '-m',
'Flag try job for --foo.'
], [
'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'linux-rel'
], [
'git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'mac-rel'
], ['git', 'cl', 'try', '-B', 'luci.chromium.try', '-b', 'win7-rel']])
def test_trigger(self):
self._run_trigger_test(regenerate=False)
self._run_trigger_test(regenerate=True)
def _setup_mock_results(self, results_fetcher):
results_fetcher.set_results(
self.linux_build,
WebTestResults({
'tests': {
'something': {
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
},
'fail-win-and-linux.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
results_fetcher.set_results(
self.win_build,
WebTestResults({
'tests': {
'something': {
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
},
'fail-win-and-linux.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
results_fetcher.set_results(
self.mac_build,
WebTestResults({
'tests': {
'something': {
'pass-unexpectedly-mac.html': {
'expected': 'FAIL',
'actual': 'PASS',
'is_unexpected': True
},
'fail-everywhere.html': {
'expected': 'FAIL',
'actual': 'FAIL',
'is_unexpected': True
}
}
}
}))
def test_update(self):
host = MockHost()
filesystem = host.filesystem
finder = PathFinder(filesystem)
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
filesystem.write_text_file(
flag_expectations_file,
'# results: [ Failure ]\nsomething/pass-unexpectedly-mac.html [ Failure ]'
)
self._setup_mock_results(host.results_fetcher)
cmd = ['update', '--flag=--foo']
TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()
def results_url(build):
return '%s/%s/%s/%s/layout-test-results/results.html' % (
'https://test-results.appspot.com/data/layout_results',
build.builder_name, build.build_number,
'blink_web_tests%20%28with%20patch%29')
self.assertEqual(
host.stdout.getvalue(), '\n'.join([
'Fetching results...',
'-- Linux: %s' % results_url(self.linux_build),
'-- Mac: %s' % results_url(self.mac_build),
'-- Win: %s' % results_url(self.win_build), '',
'### 1 unexpected passes:', '',
'[ Mac ] something/pass-unexpectedly-mac.html [ Pass ]', '',
'### 5 unexpected failures:', '',
'[ Linux ] something/fail-everywhere.html [ Failure ]',
'[ Mac ] something/fail-everywhere.html [ Failure ]',
'[ Win ] something/fail-everywhere.html [ Failure ]',
'[ Linux ] something/fail-win-and-linux.html [ Failure ]',
'[ Win ] something/fail-win-and-linux.html [ Failure ]', ''
]))
def test_update_irrelevant_unexpected_pass(self):
host = MockHost()
filesystem = host.filesystem
finder = PathFinder(filesystem)
flag_expectations_file = finder.path_from_web_tests(
'FlagExpectations', 'foo')
self._setup_mock_results(host.results_fetcher)
cmd = ['update', '--flag=--foo']
# Unexpected passes that don't have flag-specific failure expectations
# should not be reported.
filesystem.write_text_file(flag_expectations_file, '')
TryFlag(cmd, host, MockGitCL(host, self.mock_try_results)).run()
self.assertTrue('### 0 unexpected passes' in host.stdout.getvalue())
def test_invalid_action(self):
host = MockHost()
cmd = ['invalid', '--flag=--foo']
TryFlag(cmd, host, MockGitCL(host)).run()
self.assertEqual(host.stderr.getvalue(),
'specify "trigger" or "update"\n') | en | 0.856306 | # Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ## 1 unexpected passes:', '', ## 5 unexpected failures:', '', # Unexpected passes that don't have flag-specific failure expectations # should not be reported. ## 0 unexpected passes' in host.stdout.getvalue()) | 1.728271 | 2 |
SET3/QUEST6.py | w00kie/StellarQuestPythonSDK | 0 | 6615363 | <reponame>w00kie/StellarQuestPythonSDK
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright 2021 mRuggi <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from stellar_sdk import Keypair,Server,Network,TransactionBuilder
import base64
from math import ceil
import requests
data=open("nameoftheimage.png","rb").read() #open the file (binary read) the image should be of low dimension
b64=base64.b64encode(data) #encode it into base 64 string binary
b64str=str(b64)[2:] #convert into a normal string removing b'
print(b64str) #print it
numdivide=ceil((len(b64str))/128) #every manage data has 2 64bit entry
numop=numdivide+ceil(numdivide*2/128) #foreach manage data op we add 2 indexing chars
keypair=Keypair.from_secret("YOURSECRET")
print(keypair.public_key)
print()
server = Server(horizon_url="https://horizon-testnet.stellar.org")
#JUST TO SHOW OFF THE ACTUAL SPLIT
for i in range(numop):
if(i>=0 and i<=9): print("0"+str(i)+b64str[i*62+i*64:(i+1)*62+i*64])
else: print(str(i)+b64str[i*62+i*64:(i+1)*62+i*64])
print(b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
print()
tx= (
TransactionBuilder(
source_account = server.load_account(account_id=keypair.public_key),
network_passphrase=<PASSWORD>,
base_fee=10000)
)
for i in range(numop):
if(i>=0 and i<=9): tx.append_manage_data_op("0"+str(i)+b64str[i*62+i*64:(i+1)*62+i*64],b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
else: tx.append_manage_data_op(str(i)+b64str[i*62+i*64:(i+1)*62+i*64],b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
txtosign=tx.build()
txtosign.sign(keypair)
response = server.submit_transaction(txtosign)
print("\nTransaction hash: {}".format(response["hash"]))
print("Premi un tasto per continuare")
input()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright 2021 mRuggi <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
from stellar_sdk import Keypair,Server,Network,TransactionBuilder
import base64
from math import ceil
import requests
data=open("nameoftheimage.png","rb").read() #open the file (binary read) the image should be of low dimension
b64=base64.b64encode(data) #encode it into base 64 string binary
b64str=str(b64)[2:] #convert into a normal string removing b'
print(b64str) #print it
numdivide=ceil((len(b64str))/128) #every manage data has 2 64bit entry
numop=numdivide+ceil(numdivide*2/128) #foreach manage data op we add 2 indexing chars
keypair=Keypair.from_secret("YOURSECRET")
print(keypair.public_key)
print()
server = Server(horizon_url="https://horizon-testnet.stellar.org")
#JUST TO SHOW OFF THE ACTUAL SPLIT
for i in range(numop):
if(i>=0 and i<=9): print("0"+str(i)+b64str[i*62+i*64:(i+1)*62+i*64])
else: print(str(i)+b64str[i*62+i*64:(i+1)*62+i*64])
print(b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
print()
tx= (
TransactionBuilder(
source_account = server.load_account(account_id=keypair.public_key),
network_passphrase=<PASSWORD>,
base_fee=10000)
)
for i in range(numop):
if(i>=0 and i<=9): tx.append_manage_data_op("0"+str(i)+b64str[i*62+i*64:(i+1)*62+i*64],b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
else: tx.append_manage_data_op(str(i)+b64str[i*62+i*64:(i+1)*62+i*64],b64str[(i+1)*62+i*64:(i+1)*62+(i+1)*64])
txtosign=tx.build()
txtosign.sign(keypair)
response = server.submit_transaction(txtosign)
print("\nTransaction hash: {}".format(response["hash"]))
print("Premi un tasto per continuare")
input() | en | 0.805391 | #!/usr/bin/env python # -*- coding: utf-8 -*- # # # # Copyright 2021 mRuggi <<EMAIL>> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # # #open the file (binary read) the image should be of low dimension #encode it into base 64 string binary #convert into a normal string removing b' #print it #every manage data has 2 64bit entry #foreach manage data op we add 2 indexing chars #JUST TO SHOW OFF THE ACTUAL SPLIT | 2.44742 | 2 |
test.py | maandree/python-arg | 1 | 6615364 | <reponame>maandree/python-arg
#!/usr/bin/env python
# See LICENSE file for copyright and license details.
# -*- coding: utf-8 -*-
import sys
import arg
parser = arg.Parser()
assert parser.argv == sys.argv[1:]
parser = arg.Parser(argv = ['-a', '-aa', '-aaa'])
n = 0
for c in parser.flags:
assert c == 'a'
n += 1
assert n == 6
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-abc', '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert parser.lflag == '-xyz'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert parser.lflag is None
assert next(flags) == 'z'
assert parser.flag == '-z'
assert parser.symbol == '-'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for mid in ('', '-', 'x'):
parser = arg.Parser(argv = ['-abc', mid, '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == [mid, '-xyz']
parser = arg.Parser(argv = ['-abc', '--', '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-xyz']
parser = arg.Parser(argv = ['-abc', '--', '-xyz'], keep_dashdash = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['--', '-xyz']
parser = arg.Parser(argv = ['a', '--', 'b'], keep_dashdash = True, store_nonflags = True)
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 3
assert type(parser.argc) is int
assert parser.argc == 3
assert parser.argv == ['a', '--', 'b']
parser = arg.Parser(argv = ['a', '--', 'b'], keep_dashdash = False, store_nonflags = True)
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['a', 'b']
parser = arg.Parser(argv = ['-a-b'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
try:
next(flags)
assert False
except arg.UsageError:
pass
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert next(flags) == '2'
assert parser.flag == '-2'
assert next(flags) == '3'
assert parser.flag == '-3'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert parser.arghere == '123'
assert parser.isargnum
assert parser.argnum == 123
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-1', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert parser.arghere == '1'
assert parser.isargnum
assert parser.argnum == 1
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-ab', '--', '-xyz'], store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-xyz']
parser = arg.Parser(argv = ['-ab', '--', '-xyz'], keep_dashdash = True, store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['--', '-xyz']
for mid in ('o', 'oo'):
parser = arg.Parser(argv = ['-ab', mid, '-xyz'], store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == [mid]
parser = arg.Parser(argv = ['-abc'], symbols = '+')
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-abc']
parser = arg.Parser(argv = ['+xyz', '-abc'], symbols = '+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-abc']
parser = arg.Parser(argv = ['+xyz', '-abc'], symbols = '-+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
assert parser.symbol == '-'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['+xyz', '++', '-abc'], symbols = '-+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['++', '-abc']
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.arg == '23'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert parser.arg == 'yz'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for a in ('', '-x'):
parser = arg.Parser(argv = ['-1', a])
flags = parser.flags
assert next(flags) == '1'
assert parser.arg == a
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
parser.consume()
assert next(flags) == 'x'
parser.consume()
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.NEED_NO_ARGUMENT)
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['x']
for check_arg in (True, False):
for arg_need in (arg.NEED_ARGUMENT, arg.NEED_DETACHED_ARGUMENT):
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg_need)
if check_arg:
assert parser.arg == 'x'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for check_arg in (True, False):
parser = arg.Parser(argv = ['--123=x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.NEED_ATTACHED_ARGUMENT)
if check_arg:
assert parser.arg == 'x'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
try:
parser.testlong('--123', arg.NEED_ATTACHED_ARGUMENT)
assert False
except arg.UsageError:
pass
for a in ('x', ''):
parser = arg.Parser(argv = ['--123=' + a])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.MAY_HAVE_ATTACHED_ARGUMENT)
assert parser.arg == a
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', a])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.MAY_HAVE_ATTACHED_ARGUMENT)
assert parser.arg == None
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == [a]
parser = arg.Parser(argv = ['-a-'], usage = lambda : sys.exit(0))
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
next(flags)
assert False
| #!/usr/bin/env python
# See LICENSE file for copyright and license details.
# -*- coding: utf-8 -*-
import sys
import arg
parser = arg.Parser()
assert parser.argv == sys.argv[1:]
parser = arg.Parser(argv = ['-a', '-aa', '-aaa'])
n = 0
for c in parser.flags:
assert c == 'a'
n += 1
assert n == 6
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-abc', '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert parser.lflag == '-xyz'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert parser.lflag is None
assert next(flags) == 'z'
assert parser.flag == '-z'
assert parser.symbol == '-'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for mid in ('', '-', 'x'):
parser = arg.Parser(argv = ['-abc', mid, '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == [mid, '-xyz']
parser = arg.Parser(argv = ['-abc', '--', '-xyz'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-xyz']
parser = arg.Parser(argv = ['-abc', '--', '-xyz'], keep_dashdash = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['--', '-xyz']
parser = arg.Parser(argv = ['a', '--', 'b'], keep_dashdash = True, store_nonflags = True)
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 3
assert type(parser.argc) is int
assert parser.argc == 3
assert parser.argv == ['a', '--', 'b']
parser = arg.Parser(argv = ['a', '--', 'b'], keep_dashdash = False, store_nonflags = True)
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['a', 'b']
parser = arg.Parser(argv = ['-a-b'])
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
try:
next(flags)
assert False
except arg.UsageError:
pass
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert next(flags) == '2'
assert parser.flag == '-2'
assert next(flags) == '3'
assert parser.flag == '-3'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert parser.arghere == '123'
assert parser.isargnum
assert parser.argnum == 123
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-1', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.flag == '-1'
assert parser.arghere == '1'
assert parser.isargnum
assert parser.argnum == 1
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-ab', '--', '-xyz'], store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-xyz']
parser = arg.Parser(argv = ['-ab', '--', '-xyz'], keep_dashdash = True, store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['--', '-xyz']
for mid in ('o', 'oo'):
parser = arg.Parser(argv = ['-ab', mid, '-xyz'], store_nonflags = True)
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert next(flags) == 'y'
assert parser.flag == '-y'
assert next(flags) == 'z'
assert parser.flag == '-z'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == [mid]
parser = arg.Parser(argv = ['-abc'], symbols = '+')
flags = parser.flags
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-abc']
parser = arg.Parser(argv = ['+xyz', '-abc'], symbols = '+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['-abc']
parser = arg.Parser(argv = ['+xyz', '-abc'], symbols = '-+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
assert next(flags) == 'a'
assert parser.flag == '-a'
assert parser.symbol == '-'
assert next(flags) == 'b'
assert parser.flag == '-b'
assert next(flags) == 'c'
assert parser.flag == '-c'
assert parser.symbol == '-'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['+xyz', '++', '-abc'], symbols = '-+')
flags = parser.flags
assert next(flags) == 'x'
assert parser.flag == '+x'
assert parser.symbol == '+'
assert next(flags) == 'y'
assert parser.flag == '+y'
assert next(flags) == 'z'
assert parser.flag == '+z'
assert parser.symbol == '+'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 2
assert type(parser.argc) is int
assert parser.argc == 2
assert parser.argv == ['++', '-abc']
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
assert parser.arg == '23'
assert next(flags) == 'x'
assert parser.flag == '-x'
assert parser.arg == 'yz'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for a in ('', '-x'):
parser = arg.Parser(argv = ['-1', a])
flags = parser.flags
assert next(flags) == '1'
assert parser.arg == a
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['-123', '-xyz'])
flags = parser.flags
assert next(flags) == '1'
parser.consume()
assert next(flags) == 'x'
parser.consume()
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.NEED_NO_ARGUMENT)
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == ['x']
for check_arg in (True, False):
for arg_need in (arg.NEED_ARGUMENT, arg.NEED_DETACHED_ARGUMENT):
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg_need)
if check_arg:
assert parser.arg == 'x'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
for check_arg in (True, False):
parser = arg.Parser(argv = ['--123=x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.NEED_ATTACHED_ARGUMENT)
if check_arg:
assert parser.arg == 'x'
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', 'x'])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
try:
parser.testlong('--123', arg.NEED_ATTACHED_ARGUMENT)
assert False
except arg.UsageError:
pass
for a in ('x', ''):
parser = arg.Parser(argv = ['--123=' + a])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.MAY_HAVE_ATTACHED_ARGUMENT)
assert parser.arg == a
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 0
assert type(parser.argc) is int
assert parser.argc == 0
parser = arg.Parser(argv = ['--123', a])
flags = parser.flags
assert next(flags) == '-'
assert not parser.testlong('--abc')
assert parser.testlong('--123', arg.MAY_HAVE_ATTACHED_ARGUMENT)
assert parser.arg == None
try:
next(flags)
assert False
except StopIteration:
pass
assert type(parser.argv) is list
assert len(parser.argv) == 1
assert type(parser.argc) is int
assert parser.argc == 1
assert parser.argv == [a]
parser = arg.Parser(argv = ['-a-'], usage = lambda : sys.exit(0))
flags = parser.flags
assert next(flags) == 'a'
assert parser.flag == '-a'
next(flags)
assert False | en | 0.683646 | #!/usr/bin/env python # See LICENSE file for copyright and license details. # -*- coding: utf-8 -*- | 3.174507 | 3 |
app/api/auth.py | owahlen/gatecontrol | 0 | 6615365 | <filename>app/api/auth.py
import secrets
from fastapi import HTTPException
from fastapi.security import HTTPBasicCredentials, HTTPBasic
from starlette import status
from starlette.requests import Request
from app.api.config import config
class ConfigurableHTTPBasic:
def __init__(self):
self.http_basic = HTTPBasic()
async def __call__(self, request: Request):
if config.is_basic_auth_active():
return await self.http_basic(request)
else:
return None
def authorize_request(credentials: HTTPBasicCredentials):
if credentials is not None and config.is_basic_auth_active():
correct_username = secrets.compare_digest(credentials.username, config.basic_auth_username)
correct_password = secrets.compare_digest(credentials.password, config.basic_auth_password)
if not (correct_username and correct_password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Basic"},
)
| <filename>app/api/auth.py
import secrets
from fastapi import HTTPException
from fastapi.security import HTTPBasicCredentials, HTTPBasic
from starlette import status
from starlette.requests import Request
from app.api.config import config
class ConfigurableHTTPBasic:
def __init__(self):
self.http_basic = HTTPBasic()
async def __call__(self, request: Request):
if config.is_basic_auth_active():
return await self.http_basic(request)
else:
return None
def authorize_request(credentials: HTTPBasicCredentials):
if credentials is not None and config.is_basic_auth_active():
correct_username = secrets.compare_digest(credentials.username, config.basic_auth_username)
correct_password = secrets.compare_digest(credentials.password, config.basic_auth_password)
if not (correct_username and correct_password):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-Authenticate": "Basic"},
)
| none | 1 | 2.475407 | 2 | |
skgaip/tspdb/tspdb/src/database_module/db_class.py | danielsuo/toy_flood | 43 | 6615366 | <reponame>danielsuo/toy_flood
import abc
class Interface(object):
__metaclass__ = abc.ABCMeta
@property
def schema(self):
raise NotImplementedError
@abc.abstractmethod
def get_time_series(self, name, start, end, value_column, index_column, interval = 60, aggregation_method = 'average', desc = False):
"""
query time series table to return equally-spaced time series values from a certain range [start to end]
or all values with time stamp/index greter than start (if end is None)
----------
Parameters
----------
name: string
table (time series) name in database
start: int or timestamp
start index (timestamp) of the range query
end: int, timestamp
last index (timestamp) of the range query
value_column: string
name of column than contain time series value
index_col: string
name of column that contains time series index/timestamp
interval: float optional (default=60)
if time-index type is timestamp, determine the period (in seconds) in which the timestamps are truncated
aggregation_method: str optional (default='average')
the method used to aggragte values belonging to the same interval. options are: 'average', 'max', 'min', and 'median'
desc: boolean optional (default=false)
if true(false), the returned values are sorted descendingly (ascendingly) according to index_col
----------
Returns
----------
array, shape [(end - start +1) or ceil(end(in seconds) - start(in seconds) +1) / interval ]
Values of time series in the time interval start to end sorted according to index_col
"""
@abc.abstractmethod
def get_U_row(self, table_name, tsrow_range, models_range, k, return_modelno = False):
"""
query the U matrix from the database tables created via the predictin index. the query depend on the ts_row
range [tsrow_range[0] to tsrow_range[1]] and model range [models_range[0] to models_range[1]] (both inclusive)
----------
Parameters
----------
table_name: string
table name in database
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range:list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_coeff_model(self, index_name, model_no):
"""
query the c table to get the coefficients of the (model_no) sub-model
----------
Parameters
----------
index_name: string
pindex_name
models_no:int
submodel for which we want the coefficients
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_V_row(self, table_name, tscol_range, k, return_modelno ):
"""
query the V matrix from the database table created via the predictin index. the query depend on the ts_col
range [tscol_range[0] to tscol_range[1]] (inclusive)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_S_row(self, table_name, models_range,k, return_modelno = False):
"""
query the S matrix from the database table created via the predictin index. the query depend on the model
range [models_range[0] to models_range[1]] ( inclusive)
----------
Parameters
----------
table_name: string
table name in database
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_SUV(self, table_name, tscol_range, tsrow_range, models_range, k ,return_modelno):
"""
query the S, U, V matric from the database tables created via the prediction index. the query depend on the model
range, ts_col range, and ts_row range (inclusive ranges)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
S array
queried values for the selected range of S table
U array
queried values for the selected range of U table
V array
queried values for the selected range of V table
"""
pass
@abc.abstractmethod
def get_coeff(self, table_name, column = 'average'):
"""
query the LR coefficients from the database materialized view created via the index.
the query need to determine the queried column
----------
Parameters
----------
table_name: string
table name in database
column: string optioanl (default = 'average' )
column name, for possible options, refer to ...
----------
Returns
----------
coeffs array
queried coefficients for the selected average
"""
pass
@abc.abstractmethod
def query_table(self, table_name, columns_queried = [],predicate= '' ):
"""
query columns from table_name according to a predicate
----------
Parameters
----------
table_name: string
table name in database
columns_queried: list of strings
list of queries columns e.g. ['age', 'salary']
predicate: string optional (default = '')
predicate written as string e.g. 'age < 1'
----------
Returns
----------
result array
queried tuples
"""
pass
@abc.abstractmethod
def create_table(self, table_name,df, primary_key=None, load_data=True, replace_if_exists = False , include_index=True,
index_label="index"):
"""
Create table in the database with the same columns as the given pandas dataframe. Rows in the df will be written to
the newly created table if load_data.
----------
Parameters
----------
table_name: string
name of the table to be created
df: Pandas dataframe
Dataframe used to determine the schema of the table, as well as the data to be written in the new table (if load_data)
primary_key: str, optional (default None)
primary key of the table, should be one of the columns od the df
load_data: boolean optioanl (default True)
if true, load data in df to the newly created table via bulk_inset()
replace_if_exists: boolean optioanl (default False)
if true, drop the existing table of the same name (if exists).
include_index: boolean optioanl (default True)
if true, include the index column of the df, with its name being index_column_name
index_label: string optional (default "index")
name of the index column of the df in the newly created database table
"""
pass
def drop_table(self, table_name,):
"""
Drop table from database
----------
Parameters
----------
table_name: string
name of the table to be deleted
"""
pass
@abc.abstractmethod
def create_index(self, table_name, column, index_name='',):
"""
Constructs an index on a specified column of the specified table
----------
Parameters
----------
table_name: string
the name of the table to be indexed
column: string
the name of the column to be indexed on
index_name: string optional (Default '' (DB default))
the name of the index
"""
pass
@abc.abstractmethod
def create_table_from_query(self, table_name, query):
"""
Create a new table using the output of a certain query. This is equivalent to a materialized view in
PostgreSQL and Oracle
----------
Parameters
----------
table_name: string
the name of the table to be indexed
query: string
query to create table from
"""
pass
@abc.abstractmethod
def execute_query(self, query):
"""
function that simply passes queries to DB
----------
Parameters
----------
query: string
query to be executed
----------
Returns
----------
array
query output
"""
pass
@abc.abstractmethod
def insert(self,table_name, row):
"""
Insert a new row in table_name
----------
Parameters
----------
table_name: string
name of an existing table to insert the new row to
row: list
data to be inserted
"""
@abc.abstractmethod
def create_coefficients_average_table(self, table_name, created_table_name, average_windows, max_model, refresh = False ):
"""
Create the matrilized view where the coefficient averages are calculated.
----------
Parameters
----------
table_name: string
the name of the coefficient tables
created_table_name: string
the name of the created matrilized view
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
max_model: int
index of the latest submodel
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
refresh: Boolean
if true, refresh view
"""
pass
@abc.abstractmethod
def bulk_insert(self, table_name, df):
"""
Insert rows in pandas dataframe to table_name
----------
Parameters
----------
table_name: string
name of the table to which we will insert data
df pandas dataframe
Dataframe containing the data to be added
"""
pass
@abc.abstractmethod
def table_exists(self, table_name, schema = 'public'):
"""
check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table
schema: string default ('public')
"""
@abc.abstractmethod
def delete(self, table_name, predicate):
"""
check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
predicate: string
the condition to determine deleted rows
"""
@abc.abstractmethod
def get_time_diff(self, table_name, time_column, number_of_pts = 100):
"""
return the median in the difference between first 100 consecutive time points
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
time_column: string
tname of time column
number_of_pts: int
Number of point to estimate the differnce Default:100
"""
| import abc
class Interface(object):
__metaclass__ = abc.ABCMeta
@property
def schema(self):
raise NotImplementedError
@abc.abstractmethod
def get_time_series(self, name, start, end, value_column, index_column, interval = 60, aggregation_method = 'average', desc = False):
"""
query time series table to return equally-spaced time series values from a certain range [start to end]
or all values with time stamp/index greter than start (if end is None)
----------
Parameters
----------
name: string
table (time series) name in database
start: int or timestamp
start index (timestamp) of the range query
end: int, timestamp
last index (timestamp) of the range query
value_column: string
name of column than contain time series value
index_col: string
name of column that contains time series index/timestamp
interval: float optional (default=60)
if time-index type is timestamp, determine the period (in seconds) in which the timestamps are truncated
aggregation_method: str optional (default='average')
the method used to aggragte values belonging to the same interval. options are: 'average', 'max', 'min', and 'median'
desc: boolean optional (default=false)
if true(false), the returned values are sorted descendingly (ascendingly) according to index_col
----------
Returns
----------
array, shape [(end - start +1) or ceil(end(in seconds) - start(in seconds) +1) / interval ]
Values of time series in the time interval start to end sorted according to index_col
"""
@abc.abstractmethod
def get_U_row(self, table_name, tsrow_range, models_range, k, return_modelno = False):
"""
query the U matrix from the database tables created via the predictin index. the query depend on the ts_row
range [tsrow_range[0] to tsrow_range[1]] and model range [models_range[0] to models_range[1]] (both inclusive)
----------
Parameters
----------
table_name: string
table name in database
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range:list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_coeff_model(self, index_name, model_no):
"""
query the c table to get the coefficients of the (model_no) sub-model
----------
Parameters
----------
index_name: string
pindex_name
models_no:int
submodel for which we want the coefficients
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_V_row(self, table_name, tscol_range, k, return_modelno ):
"""
query the V matrix from the database table created via the predictin index. the query depend on the ts_col
range [tscol_range[0] to tscol_range[1]] (inclusive)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_S_row(self, table_name, models_range,k, return_modelno = False):
"""
query the S matrix from the database table created via the predictin index. the query depend on the model
range [models_range[0] to models_range[1]] ( inclusive)
----------
Parameters
----------
table_name: string
table name in database
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range
"""
pass
@abc.abstractmethod
def get_SUV(self, table_name, tscol_range, tsrow_range, models_range, k ,return_modelno):
"""
query the S, U, V matric from the database tables created via the prediction index. the query depend on the model
range, ts_col range, and ts_row range (inclusive ranges)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
S array
queried values for the selected range of S table
U array
queried values for the selected range of U table
V array
queried values for the selected range of V table
"""
pass
@abc.abstractmethod
def get_coeff(self, table_name, column = 'average'):
"""
query the LR coefficients from the database materialized view created via the index.
the query need to determine the queried column
----------
Parameters
----------
table_name: string
table name in database
column: string optioanl (default = 'average' )
column name, for possible options, refer to ...
----------
Returns
----------
coeffs array
queried coefficients for the selected average
"""
pass
@abc.abstractmethod
def query_table(self, table_name, columns_queried = [],predicate= '' ):
"""
query columns from table_name according to a predicate
----------
Parameters
----------
table_name: string
table name in database
columns_queried: list of strings
list of queries columns e.g. ['age', 'salary']
predicate: string optional (default = '')
predicate written as string e.g. 'age < 1'
----------
Returns
----------
result array
queried tuples
"""
pass
@abc.abstractmethod
def create_table(self, table_name,df, primary_key=None, load_data=True, replace_if_exists = False , include_index=True,
index_label="index"):
"""
Create table in the database with the same columns as the given pandas dataframe. Rows in the df will be written to
the newly created table if load_data.
----------
Parameters
----------
table_name: string
name of the table to be created
df: Pandas dataframe
Dataframe used to determine the schema of the table, as well as the data to be written in the new table (if load_data)
primary_key: str, optional (default None)
primary key of the table, should be one of the columns od the df
load_data: boolean optioanl (default True)
if true, load data in df to the newly created table via bulk_inset()
replace_if_exists: boolean optioanl (default False)
if true, drop the existing table of the same name (if exists).
include_index: boolean optioanl (default True)
if true, include the index column of the df, with its name being index_column_name
index_label: string optional (default "index")
name of the index column of the df in the newly created database table
"""
pass
def drop_table(self, table_name,):
"""
Drop table from database
----------
Parameters
----------
table_name: string
name of the table to be deleted
"""
pass
@abc.abstractmethod
def create_index(self, table_name, column, index_name='',):
"""
Constructs an index on a specified column of the specified table
----------
Parameters
----------
table_name: string
the name of the table to be indexed
column: string
the name of the column to be indexed on
index_name: string optional (Default '' (DB default))
the name of the index
"""
pass
@abc.abstractmethod
def create_table_from_query(self, table_name, query):
"""
Create a new table using the output of a certain query. This is equivalent to a materialized view in
PostgreSQL and Oracle
----------
Parameters
----------
table_name: string
the name of the table to be indexed
query: string
query to create table from
"""
pass
@abc.abstractmethod
def execute_query(self, query):
"""
function that simply passes queries to DB
----------
Parameters
----------
query: string
query to be executed
----------
Returns
----------
array
query output
"""
pass
@abc.abstractmethod
def insert(self,table_name, row):
"""
Insert a new row in table_name
----------
Parameters
----------
table_name: string
name of an existing table to insert the new row to
row: list
data to be inserted
"""
@abc.abstractmethod
def create_coefficients_average_table(self, table_name, created_table_name, average_windows, max_model, refresh = False ):
"""
Create the matrilized view where the coefficient averages are calculated.
----------
Parameters
----------
table_name: string
the name of the coefficient tables
created_table_name: string
the name of the created matrilized view
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
max_model: int
index of the latest submodel
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
refresh: Boolean
if true, refresh view
"""
pass
@abc.abstractmethod
def bulk_insert(self, table_name, df):
"""
Insert rows in pandas dataframe to table_name
----------
Parameters
----------
table_name: string
name of the table to which we will insert data
df pandas dataframe
Dataframe containing the data to be added
"""
pass
@abc.abstractmethod
def table_exists(self, table_name, schema = 'public'):
"""
check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table
schema: string default ('public')
"""
@abc.abstractmethod
def delete(self, table_name, predicate):
"""
check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
predicate: string
the condition to determine deleted rows
"""
@abc.abstractmethod
def get_time_diff(self, table_name, time_column, number_of_pts = 100):
"""
return the median in the difference between first 100 consecutive time points
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
time_column: string
tname of time column
number_of_pts: int
Number of point to estimate the differnce Default:100
""" | en | 0.524287 | query time series table to return equally-spaced time series values from a certain range [start to end]
or all values with time stamp/index greter than start (if end is None)
----------
Parameters
----------
name: string
table (time series) name in database
start: int or timestamp
start index (timestamp) of the range query
end: int, timestamp
last index (timestamp) of the range query
value_column: string
name of column than contain time series value
index_col: string
name of column that contains time series index/timestamp
interval: float optional (default=60)
if time-index type is timestamp, determine the period (in seconds) in which the timestamps are truncated
aggregation_method: str optional (default='average')
the method used to aggragte values belonging to the same interval. options are: 'average', 'max', 'min', and 'median'
desc: boolean optional (default=false)
if true(false), the returned values are sorted descendingly (ascendingly) according to index_col
----------
Returns
----------
array, shape [(end - start +1) or ceil(end(in seconds) - start(in seconds) +1) / interval ]
Values of time series in the time interval start to end sorted according to index_col query the U matrix from the database tables created via the predictin index. the query depend on the ts_row
range [tsrow_range[0] to tsrow_range[1]] and model range [models_range[0] to models_range[1]] (both inclusive)
----------
Parameters
----------
table_name: string
table name in database
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range:list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range query the c table to get the coefficients of the (model_no) sub-model
----------
Parameters
----------
index_name: string
pindex_name
models_no:int
submodel for which we want the coefficients
----------
Returns
----------
array
queried values for the selected range query the V matrix from the database table created via the predictin index. the query depend on the ts_col
range [tscol_range[0] to tscol_range[1]] (inclusive)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range query the S matrix from the database table created via the predictin index. the query depend on the model
range [models_range[0] to models_range[1]] ( inclusive)
----------
Parameters
----------
table_name: string
table name in database
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
array
queried values for the selected range query the S, U, V matric from the database tables created via the prediction index. the query depend on the model
range, ts_col range, and ts_row range (inclusive ranges)
----------
Parameters
----------
table_name: string
table name in database
tscol_range:list of length 2
start and end index of the range query predicate on ts_col
tsrow_range:list of length 2
start and end index of the range query predicate on ts_row
models_range: list of length 2
start and end index of the range query predicate on model_no
k: int
number of singular values retained in the prediction index
return_modelno: boolean optional (default=false)
if true, submodel numbers are returned in the first column
----------
Returns
----------
S array
queried values for the selected range of S table
U array
queried values for the selected range of U table
V array
queried values for the selected range of V table query the LR coefficients from the database materialized view created via the index.
the query need to determine the queried column
----------
Parameters
----------
table_name: string
table name in database
column: string optioanl (default = 'average' )
column name, for possible options, refer to ...
----------
Returns
----------
coeffs array
queried coefficients for the selected average query columns from table_name according to a predicate
----------
Parameters
----------
table_name: string
table name in database
columns_queried: list of strings
list of queries columns e.g. ['age', 'salary']
predicate: string optional (default = '')
predicate written as string e.g. 'age < 1'
----------
Returns
----------
result array
queried tuples Create table in the database with the same columns as the given pandas dataframe. Rows in the df will be written to
the newly created table if load_data.
----------
Parameters
----------
table_name: string
name of the table to be created
df: Pandas dataframe
Dataframe used to determine the schema of the table, as well as the data to be written in the new table (if load_data)
primary_key: str, optional (default None)
primary key of the table, should be one of the columns od the df
load_data: boolean optioanl (default True)
if true, load data in df to the newly created table via bulk_inset()
replace_if_exists: boolean optioanl (default False)
if true, drop the existing table of the same name (if exists).
include_index: boolean optioanl (default True)
if true, include the index column of the df, with its name being index_column_name
index_label: string optional (default "index")
name of the index column of the df in the newly created database table Drop table from database
----------
Parameters
----------
table_name: string
name of the table to be deleted Constructs an index on a specified column of the specified table
----------
Parameters
----------
table_name: string
the name of the table to be indexed
column: string
the name of the column to be indexed on
index_name: string optional (Default '' (DB default))
the name of the index Create a new table using the output of a certain query. This is equivalent to a materialized view in
PostgreSQL and Oracle
----------
Parameters
----------
table_name: string
the name of the table to be indexed
query: string
query to create table from function that simply passes queries to DB
----------
Parameters
----------
query: string
query to be executed
----------
Returns
----------
array
query output Insert a new row in table_name
----------
Parameters
----------
table_name: string
name of an existing table to insert the new row to
row: list
data to be inserted Create the matrilized view where the coefficient averages are calculated.
----------
Parameters
----------
table_name: string
the name of the coefficient tables
created_table_name: string
the name of the created matrilized view
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
max_model: int
index of the latest submodel
average_windows: list
windows for averages to be calculated (e.g.: [10,20] calc. last ten and 20 models)
refresh: Boolean
if true, refresh view Insert rows in pandas dataframe to table_name
----------
Parameters
----------
table_name: string
name of the table to which we will insert data
df pandas dataframe
Dataframe containing the data to be added check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table
schema: string default ('public') check if a table exists in a certain database and schema
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
predicate: string
the condition to determine deleted rows return the median in the difference between first 100 consecutive time points
----------
Parameters
----------
table_name: string
name of the table contating the row to be deleted
time_column: string
tname of time column
number_of_pts: int
Number of point to estimate the differnce Default:100 | 3.133533 | 3 |
JPS_Chatbot/UI/source/JPS_Query/OntoCompChem_Queries.py | mdhillmancmcl/TheWorldAvatar-CMCL-Fork | 21 | 6615367 | <gh_stars>10-100
# template has one slot: species
ROTATIONAL_CONSTANT_QUERY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?rotational_constants_value ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?rotational_constants .
?rotational_constants ontocompchem:hasRotationalConstants ?rotational_constants_value .
OPTIONAL {
?rotational_constants gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
}
} LIMIT 1
'''
VIBRATION_FREQUENCY_QUERY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?frequency ?name ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?VibrationalAnalysis .
?VibrationalAnalysis rdf:type gc:VibrationalAnalysis .
?VibrationalAnalysis gc:hasResult ?result .
?result ontocompchem:hasFrequencies ?frequency .
OPTIONAL {
?result gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) .
}
} LIMIT 1
'''
ROTATIONAL_SYMMETRY_NUMBER = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?symmetry_number
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?RotationalSymmetry .
?RotationalSymmetry rdf:type ontocompchem:RotationalSymmetry .
?RotationalSymmetry ontocompchem:hasRotationalSymmetryNumber ?symmetry_number .
} LIMIT 1
'''
GAUSSIAN_FILE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?File
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation ontocompchem:hasEnvironment ?Environment .
?Environment gc:hasOutputFile ?File .
} LIMIT 1
'''
SPIN_MULTIPLICITY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?SpinMultiplicity
WHERE {
?Molecule ontocompchem:hasSpinMultiplicity ?SpinMultiplicity .
?GeometryOptimization gc:hasMolecule ?Molecule .
?g_calculation gc:isCalculationOn ?GeometryOptimization .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
} LIMIT 1
'''
FORMAL_CHARGE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?FormalCharge_value
WHERE {
?Molecule gc:hasFormalCharge ?FormalCharge .
?FormalCharge gc:hasValue ?FormalCharge_value .
?GeometryOptimization gc:hasMolecule ?Molecule .
?g_calculation gc:isCalculationOn ?GeometryOptimization .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
OPTIONAL {
?FormalCharge gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) .
}
} LIMIT 1
'''
ELECTRONIC_ENERGY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?Electronic_energy ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?ScfEnergy .
?ScfEnergy gc:hasElectronicEnergy ?x .
?x gc:hasValue ?Electronic_energy .
OPTIONAL {
?x gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
} # http://data.nasa.gov/qudt/owl/unit#Hartree
} LIMIT 1
'''
GEOMETRY_TYPE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?GeometryTypeValue
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?GeometryType .
?GeometryType ontocompchem:hasGeometryType ?GeometryTypeValue .
OPTIONAL {
?x gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
} # http://data.nasa.gov/qudt/owl/unit#Hartree
} LIMIT 1
'''
ontocompchem_simple_intents = ['symmetry_number',
'rotational_constants',
'vibration_frequency',
'guassian_file',
'spin_multiplicity',
'formal_charge',
'electronic_energy',
'geometry_type']
intent_to_template_mapping = {'rotational_constants': ROTATIONAL_CONSTANT_QUERY,
'symmetry_number': ROTATIONAL_SYMMETRY_NUMBER,
'vibration_frequency': VIBRATION_FREQUENCY_QUERY, 'guassian_file': GAUSSIAN_FILE,
'formal_charge': FORMAL_CHARGE, 'electronic_energy': ELECTRONIC_ENERGY,
'geometry_type': GEOMETRY_TYPE, 'spin_multiplicity': SPIN_MULTIPLICITY}
| # template has one slot: species
ROTATIONAL_CONSTANT_QUERY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?rotational_constants_value ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?rotational_constants .
?rotational_constants ontocompchem:hasRotationalConstants ?rotational_constants_value .
OPTIONAL {
?rotational_constants gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
}
} LIMIT 1
'''
VIBRATION_FREQUENCY_QUERY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?frequency ?name ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?VibrationalAnalysis .
?VibrationalAnalysis rdf:type gc:VibrationalAnalysis .
?VibrationalAnalysis gc:hasResult ?result .
?result ontocompchem:hasFrequencies ?frequency .
OPTIONAL {
?result gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) .
}
} LIMIT 1
'''
ROTATIONAL_SYMMETRY_NUMBER = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?symmetry_number
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?RotationalSymmetry .
?RotationalSymmetry rdf:type ontocompchem:RotationalSymmetry .
?RotationalSymmetry ontocompchem:hasRotationalSymmetryNumber ?symmetry_number .
} LIMIT 1
'''
GAUSSIAN_FILE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?File
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation ontocompchem:hasEnvironment ?Environment .
?Environment gc:hasOutputFile ?File .
} LIMIT 1
'''
SPIN_MULTIPLICITY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?SpinMultiplicity
WHERE {
?Molecule ontocompchem:hasSpinMultiplicity ?SpinMultiplicity .
?GeometryOptimization gc:hasMolecule ?Molecule .
?g_calculation gc:isCalculationOn ?GeometryOptimization .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
} LIMIT 1
'''
FORMAL_CHARGE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?FormalCharge_value
WHERE {
?Molecule gc:hasFormalCharge ?FormalCharge .
?FormalCharge gc:hasValue ?FormalCharge_value .
?GeometryOptimization gc:hasMolecule ?Molecule .
?g_calculation gc:isCalculationOn ?GeometryOptimization .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
OPTIONAL {
?FormalCharge gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) .
}
} LIMIT 1
'''
ELECTRONIC_ENERGY = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?Electronic_energy ?unit_short
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?ScfEnergy .
?ScfEnergy gc:hasElectronicEnergy ?x .
?x gc:hasValue ?Electronic_energy .
OPTIONAL {
?x gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
} # http://data.nasa.gov/qudt/owl/unit#Hartree
} LIMIT 1
'''
GEOMETRY_TYPE = '''
PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#>
PREFIX gc: <http://purl.org/gc/>
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#>
SELECT DISTINCT ?name ?GeometryTypeValue
WHERE {
?g_calculation rdf:type ontocompchem:G09 .
?g_calculation ontocompchem:hasInitialization ?initialization .
?initialization gc:hasMoleculeProperty ?molecule_property .
?molecule_property gc:hasName ?name .
?molecule_property gc:hasName "%s" .
# ============ to match molecule =========================
?g_calculation gc:isCalculationOn ?GeometryType .
?GeometryType ontocompchem:hasGeometryType ?GeometryTypeValue .
OPTIONAL {
?x gc:hasUnit ?unit .
BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) .
} # http://data.nasa.gov/qudt/owl/unit#Hartree
} LIMIT 1
'''
ontocompchem_simple_intents = ['symmetry_number',
'rotational_constants',
'vibration_frequency',
'guassian_file',
'spin_multiplicity',
'formal_charge',
'electronic_energy',
'geometry_type']
intent_to_template_mapping = {'rotational_constants': ROTATIONAL_CONSTANT_QUERY,
'symmetry_number': ROTATIONAL_SYMMETRY_NUMBER,
'vibration_frequency': VIBRATION_FREQUENCY_QUERY, 'guassian_file': GAUSSIAN_FILE,
'formal_charge': FORMAL_CHARGE, 'electronic_energy': ELECTRONIC_ENERGY,
'geometry_type': GEOMETRY_TYPE, 'spin_multiplicity': SPIN_MULTIPLICITY} | en | 0.404962 | # template has one slot: species PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?rotational_constants_value ?unit_short WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation gc:isCalculationOn ?rotational_constants . ?rotational_constants ontocompchem:hasRotationalConstants ?rotational_constants_value . OPTIONAL { ?rotational_constants gc:hasUnit ?unit . BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) . } } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?frequency ?name ?unit_short WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation gc:isCalculationOn ?VibrationalAnalysis . ?VibrationalAnalysis rdf:type gc:VibrationalAnalysis . ?VibrationalAnalysis gc:hasResult ?result . ?result ontocompchem:hasFrequencies ?frequency . OPTIONAL { ?result gc:hasUnit ?unit . BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) . } } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?symmetry_number WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation gc:isCalculationOn ?RotationalSymmetry . ?RotationalSymmetry rdf:type ontocompchem:RotationalSymmetry . ?RotationalSymmetry ontocompchem:hasRotationalSymmetryNumber ?symmetry_number . } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?File WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation ontocompchem:hasEnvironment ?Environment . ?Environment gc:hasOutputFile ?File . } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?SpinMultiplicity WHERE { ?Molecule ontocompchem:hasSpinMultiplicity ?SpinMultiplicity . ?GeometryOptimization gc:hasMolecule ?Molecule . ?g_calculation gc:isCalculationOn ?GeometryOptimization . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?FormalCharge_value WHERE { ?Molecule gc:hasFormalCharge ?FormalCharge . ?FormalCharge gc:hasValue ?FormalCharge_value . ?GeometryOptimization gc:hasMolecule ?Molecule . ?g_calculation gc:isCalculationOn ?GeometryOptimization . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . OPTIONAL { ?FormalCharge gc:hasUnit ?unit . BIND(REPLACE(STR(?unit),"http://purl.org/gc/","") AS ?unit_short) . } } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?Electronic_energy ?unit_short WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation gc:isCalculationOn ?ScfEnergy . ?ScfEnergy gc:hasElectronicEnergy ?x . ?x gc:hasValue ?Electronic_energy . OPTIONAL { ?x gc:hasUnit ?unit . BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) . } # http://data.nasa.gov/qudt/owl/unit#Hartree } LIMIT 1 PREFIX compchemkb: <https://como.cheng.cam.ac.uk/kb/compchem.owl#> PREFIX gc: <http://purl.org/gc/> PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#> PREFIX ontocompchem:<http://www.theworldavatar.com/ontology/ontocompchem/ontocompchem.owl#> SELECT DISTINCT ?name ?GeometryTypeValue WHERE { ?g_calculation rdf:type ontocompchem:G09 . ?g_calculation ontocompchem:hasInitialization ?initialization . ?initialization gc:hasMoleculeProperty ?molecule_property . ?molecule_property gc:hasName ?name . ?molecule_property gc:hasName "%s" . # ============ to match molecule ========================= ?g_calculation gc:isCalculationOn ?GeometryType . ?GeometryType ontocompchem:hasGeometryType ?GeometryTypeValue . OPTIONAL { ?x gc:hasUnit ?unit . BIND(REPLACE(STR(?unit),"http://data.nasa.gov/qudt/owl/unit#","") AS ?unit_short) . } # http://data.nasa.gov/qudt/owl/unit#Hartree } LIMIT 1 | 1.770705 | 2 |
api/tests/test_views.py | Festorah/datapeace | 0 | 6615368 | <reponame>Festorah/datapeace<filename>api/tests/test_views.py
from.test_setup import TestSetUp
class TestViews(TestSetUp):
def test_users_view_list(self):
res = self.client.get(self.users_url)
self.assertEqual(res.status_code, 200)
def test_create_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
self.assertEqual(res.status_code, 200)
def test_get_user_detail(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.get(self.users_detail)
self.assertEqual(res.status_code, 200)
def test_update_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.put(self.users_detail, self.user_data, format="json")
self.assertEqual(res.status_code, 200)
def test_delete_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.delete(self.users_detail)
self.assertEqual(res.status_code, 200) | from.test_setup import TestSetUp
class TestViews(TestSetUp):
def test_users_view_list(self):
res = self.client.get(self.users_url)
self.assertEqual(res.status_code, 200)
def test_create_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
self.assertEqual(res.status_code, 200)
def test_get_user_detail(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.get(self.users_detail)
self.assertEqual(res.status_code, 200)
def test_update_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.put(self.users_detail, self.user_data, format="json")
self.assertEqual(res.status_code, 200)
def test_delete_user(self):
res = self.client.post(self.users_url, self.user_data, format="json")
res = self.client.delete(self.users_detail)
self.assertEqual(res.status_code, 200) | none | 1 | 2.533701 | 3 | |
programs/bonus.py | VishalAgr11/CSE-programs | 1 | 6615369 | '''
Calculate Bonus marks of a student
moodle
'''
m=float(input())
if m==0:
print("Enter the appropriate mark")
else:
c=input()
if m>=80:
if c=='T':
print(0.08*m+m)
elif c=='L':
print(0.06*m+m)
elif 60<=m and m<80:
if c=='T':
print(0.06*m+m)
elif c=='L':
print(0.04*m+m)
elif 40<=m and m<60:
if c=='T':
print(0.04*m+m)
elif c=='L':
print(0.02*m+m)
else:
print(0)
| '''
Calculate Bonus marks of a student
moodle
'''
m=float(input())
if m==0:
print("Enter the appropriate mark")
else:
c=input()
if m>=80:
if c=='T':
print(0.08*m+m)
elif c=='L':
print(0.06*m+m)
elif 60<=m and m<80:
if c=='T':
print(0.06*m+m)
elif c=='L':
print(0.04*m+m)
elif 40<=m and m<60:
if c=='T':
print(0.04*m+m)
elif c=='L':
print(0.02*m+m)
else:
print(0)
| en | 0.701594 | Calculate Bonus marks of a student moodle | 3.609684 | 4 |
setup.py | mhuertascompany/sfh-inference | 0 | 6615370 | from setuptools import find_packages
from setuptools import setup
setup(name='sfh',
description='sfh package',
author='AstroInfo',
packages=find_packages(),
install_requires=['astropy', 'tensorflow_datasets']
) | from setuptools import find_packages
from setuptools import setup
setup(name='sfh',
description='sfh package',
author='AstroInfo',
packages=find_packages(),
install_requires=['astropy', 'tensorflow_datasets']
) | none | 1 | 1.328047 | 1 | |
PallindromeTwo.py | seidenstein/CC | 0 | 6615371 | # Palindrome Two
# Have the function PalindromeTwo(str) take the str parameter being passed and return the string
# true if the parameter is a palindrome, (the string is the same forward as it is backward)
# otherwise return the string false. The parameter entered may have punctuation and symbols
# but they should not affect whether the string is in fact a palindrome. For example:
# "Anne, I vote more cars race Rome-to-Vienna" should return true.
def PalindromeTwo(strParam):
# code goes here
#first get rid of all spaces/specialchars
strParam = ''.join(e for e in strParam if e.isalnum())
#make it all uppercase
strParam = strParam.upper()
chars = len(strParam)
half = chars//2
pal = True
for x in range(half):
if strParam[x] != strParam[chars-1-x]:
pal = False
break
return pal
# keep this function call here
print(PalindromeTwo(input())) | # Palindrome Two
# Have the function PalindromeTwo(str) take the str parameter being passed and return the string
# true if the parameter is a palindrome, (the string is the same forward as it is backward)
# otherwise return the string false. The parameter entered may have punctuation and symbols
# but they should not affect whether the string is in fact a palindrome. For example:
# "Anne, I vote more cars race Rome-to-Vienna" should return true.
def PalindromeTwo(strParam):
# code goes here
#first get rid of all spaces/specialchars
strParam = ''.join(e for e in strParam if e.isalnum())
#make it all uppercase
strParam = strParam.upper()
chars = len(strParam)
half = chars//2
pal = True
for x in range(half):
if strParam[x] != strParam[chars-1-x]:
pal = False
break
return pal
# keep this function call here
print(PalindromeTwo(input())) | en | 0.610935 | # Palindrome Two # Have the function PalindromeTwo(str) take the str parameter being passed and return the string # true if the parameter is a palindrome, (the string is the same forward as it is backward) # otherwise return the string false. The parameter entered may have punctuation and symbols # but they should not affect whether the string is in fact a palindrome. For example: # "Anne, I vote more cars race Rome-to-Vienna" should return true. # code goes here #first get rid of all spaces/specialchars #make it all uppercase # keep this function call here | 4.192941 | 4 |
app/__init__.py | Suspir0n/Programming-CEP-Search | 1 | 6615372 | <reponame>Suspir0n/Programming-CEP-Search
from flask import Flask, render_template, request
from .views import validation_cep, address_view
from .settings.connection import connect_db
from .settings.config import config_db, config_ma, secret_key
import requests
import json
def create_app():
app = Flask(__name__)
connect_db(app)
config_db(app)
config_ma(app)
secret_key(app)
return app
_app = create_app()
@_app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@_app.route('/search_cep', methods=['GET'])
def response(data):
return render_template('response.html', data=data)
@_app.route('/search_cep', methods=['POST'])
def search_cep():
cep_expected = str(request.form['cep'])
issave = str(request.form['save'])
got = validation_cep(cep_expected, issave)
return response(got)
@_app.route('/address', methods=['GET'])
def get_addresss():
return address_view.get_all()
@_app.route('/address/<uid>', methods=['GET'])
def get_address(uid):
return address_view.get_one(uid)
@_app.route('/address', methods=['POST'])
def post_address():
return address_view.post()
@_app.route('/address/<uid>', methods=['DELETE'])
def delete_adress(uid):
return address_view.delete(uid)
@_app.route('/address/<uid>', methods=['PUT'])
def update_address(uid):
return address_view.update(uid)
| from flask import Flask, render_template, request
from .views import validation_cep, address_view
from .settings.connection import connect_db
from .settings.config import config_db, config_ma, secret_key
import requests
import json
def create_app():
app = Flask(__name__)
connect_db(app)
config_db(app)
config_ma(app)
secret_key(app)
return app
_app = create_app()
@_app.route('/', methods=['GET'])
def index():
return render_template('index.html')
@_app.route('/search_cep', methods=['GET'])
def response(data):
return render_template('response.html', data=data)
@_app.route('/search_cep', methods=['POST'])
def search_cep():
cep_expected = str(request.form['cep'])
issave = str(request.form['save'])
got = validation_cep(cep_expected, issave)
return response(got)
@_app.route('/address', methods=['GET'])
def get_addresss():
return address_view.get_all()
@_app.route('/address/<uid>', methods=['GET'])
def get_address(uid):
return address_view.get_one(uid)
@_app.route('/address', methods=['POST'])
def post_address():
return address_view.post()
@_app.route('/address/<uid>', methods=['DELETE'])
def delete_adress(uid):
return address_view.delete(uid)
@_app.route('/address/<uid>', methods=['PUT'])
def update_address(uid):
return address_view.update(uid) | none | 1 | 2.175884 | 2 | |
analytical_code/clct-MSD.py | yizhang-code/dynamics-simulations-of-human-movement | 0 | 6615373 | <reponame>yizhang-code/dynamics-simulations-of-human-movement
import os
import sys
# import random
from math import sqrt
import numpy as np
# import matplotlib.pyplot as plt
cur_dir = os.getcwd()
dt = float(sys.argv[1])
max_t = int(sys.argv[1])
class LoadAllxy(object):
"""docstring for LoadAllxy"""
def __init__(self, particle_indx: int):
self.loc = []
self.particle_indx = particle_indx
self.particle_num = 0
@staticmethod
def exist(x, y):
if x != -1 and y != -1:
return True
else:
return False
@staticmethod
def find_center(lx, ly, rx, ry):
lx, ly, rx, ry = float(lx), float(ly), float(rx), float(ry)
if LoadAllxy.exist(lx, ly) and LoadAllxy.exist(rx, ry):
cx, cy = (lx + rx) / 2, (ly + ry) / 2
dx, dy = rx - lx, ry - ly
ds = sqrt(dx**2 + dy**2)
if ds != 0:
flag = 1
Tx, Ty = -dy / ds, dx / ds
abs_ori = np.arctan2(Ty, Tx)
else:
flag = -1
abs_ori = None
else:
flag = -1
cx, cy = -1, -1
abs_ori = None
return (flag, cx, cy, abs_ori)
def extract(self, sec_loc):
lx, ly, rx, ry = sec_loc[0], sec_loc[1], sec_loc[2], sec_loc[3]
c_xy = LoadAllxy.find_center(lx, ly, rx, ry)
# print(lx, ly, rx, ry, c_xy)
self.loc.append(c_xy)
def load_all_xy(self):
with open('{}/{}'.format(cur_dir, 'all_xy.csv'), 'r') as rf:
for _, line in enumerate(rf):
line = line.split(',')[1:]
self.extract(line[self.particle_indx *
4:self.particle_indx * 4 + 4])
self.particle_num = int(len(line) / 4)
return self.loc
class ClctMSD(object):
"""docstring for ClctMSD"""
def __init__(self, loc, data: dict, particle_num: int):
self.loc = loc
self.max_t = max_t
self.data = data
self.chunk_loc = {}
self.chunk_ori = {}
self.particle_num = particle_num
def chunk_raw(self):
flag = 0
self.chunk_loc[flag], self.chunk_ori[flag] = [], []
for i, item in enumerate(self.loc):
if item[0] != -1:
self.chunk_loc[flag].append((item[1], item[2]))
self.chunk_ori[flag].append(item[3])
if len(self.chunk_loc[flag]) != 0 and item[0] == -1:
flag += 1
self.chunk_loc[flag] = []
self.chunk_ori[flag] = []
else:
pass
@staticmethod
def clct_dis(x1, y1, x2, y2):
return (x1 - x2)**2 + (y1 - y2)**2
@staticmethod
def map2range(ori_list):
if len(ori_list) == 0:
return ori_list
ranged_ori_list = [ori_list[0]]
for i in range(1, len(ori_list)):
increment = ori_list[i] - ori_list[i - 1]
if increment > np.pi:
increment -= 2 * np.pi
if increment < -np.pi:
increment += 2 * np.pi
ranged_ori_list.append(ranged_ori_list[i - 1] + increment)
return ranged_ori_list
def output(self, ori_list, flag):
if not os.path.exists('{}/abs-angles'.format(cur_dir)):
os.mkdir('{}/abs-angles'.format(cur_dir))
if len(ori_list) <= 600:
return True
else:
with open('{}/abs-angles/{}_{}_ori.csv'.format(cur_dir, self.particle_num, flag), 'w') as wf:
# with open('{}/abs-angles/{}_ori.csv'.format(cur_dir, 'abs-agl-test', flag), 'w') as wf:
for ori in ori_list:
wf.write('{}\n'.format(ori))
def clctmsd(self, t):
sum_dx, sum_dy, sum_da = 0, 0, 0
sum_dr2, sum_da2, N = 0, 0, 0
sum_dxda, sum_dyda, sum_dxdy = 0, 0, 0
for flag, loc_list in self.chunk_loc.items():
ori_list = self.map2range(self.chunk_ori[flag])
for i in range(1, len(ori_list)):
if ori_list[i] - ori_list[i - 1] > np.pi:
print(i, ori_list[i], ori_list[i - 1])
self.output(ori_list, flag)
if len(loc_list) > t:
for i in range(len(loc_list) - t):
dx = loc_list[i + t][0] - loc_list[i][0]
dy = loc_list[i + t][1] - loc_list[i][1]
da = ori_list[i + t] - ori_list[i]
sum_dr2 += dx * dx + dy * dy
sum_da2 += da * da
sum_dx += dx
sum_dy += dy
sum_da += da
sum_dxdy += dx * dy
sum_dxda += dx * da
sum_dyda += dy * da
N += 1
if N == 0:
return (0, 0, 0, 0, 0, 0, 0, 0, 0)
else:
return (sum_dr2 / N, sum_da2 / N, sum_dx / N, sum_dy / N, sum_da / N, sum_dxdy / N, sum_dxda / N, sum_dyda / N, 1)
def find_msd_vs_t(self):
self.chunk_raw()
for t in range(self.max_t):
print(self.particle_num, t)
dr2, da2, dx, dy, da, dxdy, dxda, dyda, count = self.clctmsd(t)
if t not in self.data:
self.data[t] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.data[t][0] += dr2
self.data[t][1] += da2
self.data[t][2] += dx
self.data[t][3] += dy
self.data[t][4] += da
self.data[t][5] += dxdy
self.data[t][6] += dxda
self.data[t][7] += dyda
self.data[t][8] += count
return self.data
if __name__ == '__main__':
data = {}
particle_0 = LoadAllxy(0)
particle_0_loc = particle_0.load_all_xy()
particle_num = particle_0.particle_num
data = ClctMSD(particle_0_loc, data, 0).find_msd_vs_t()
for num in range(1, particle_num):
print(num)
data = ClctMSD(LoadAllxy(num).load_all_xy(), data, num).find_msd_vs_t()
with open('{}/{}'.format(cur_dir, 'msd_vs_t-test0.csv'), 'w') as wf:
for key in sorted(data.keys()):
dr2, da2, dx, dy, da, dxdy, dxda, dyda, count = data[key][0], data[key][1], data[key][
2], data[key][3], data[key][4], data[key][5], data[key][6], data[key][7], data[key][8]
wf.write('{},{},{},{},{},{},{},{},{}'.format(key * dt, dr2 / count, da2 / count,
dx / count, dy / count, da / count, dxdy / count, dxda / count, dyda / count))
wf.write('\n')
| import os
import sys
# import random
from math import sqrt
import numpy as np
# import matplotlib.pyplot as plt
cur_dir = os.getcwd()
dt = float(sys.argv[1])
max_t = int(sys.argv[1])
class LoadAllxy(object):
"""docstring for LoadAllxy"""
def __init__(self, particle_indx: int):
self.loc = []
self.particle_indx = particle_indx
self.particle_num = 0
@staticmethod
def exist(x, y):
if x != -1 and y != -1:
return True
else:
return False
@staticmethod
def find_center(lx, ly, rx, ry):
lx, ly, rx, ry = float(lx), float(ly), float(rx), float(ry)
if LoadAllxy.exist(lx, ly) and LoadAllxy.exist(rx, ry):
cx, cy = (lx + rx) / 2, (ly + ry) / 2
dx, dy = rx - lx, ry - ly
ds = sqrt(dx**2 + dy**2)
if ds != 0:
flag = 1
Tx, Ty = -dy / ds, dx / ds
abs_ori = np.arctan2(Ty, Tx)
else:
flag = -1
abs_ori = None
else:
flag = -1
cx, cy = -1, -1
abs_ori = None
return (flag, cx, cy, abs_ori)
def extract(self, sec_loc):
lx, ly, rx, ry = sec_loc[0], sec_loc[1], sec_loc[2], sec_loc[3]
c_xy = LoadAllxy.find_center(lx, ly, rx, ry)
# print(lx, ly, rx, ry, c_xy)
self.loc.append(c_xy)
def load_all_xy(self):
with open('{}/{}'.format(cur_dir, 'all_xy.csv'), 'r') as rf:
for _, line in enumerate(rf):
line = line.split(',')[1:]
self.extract(line[self.particle_indx *
4:self.particle_indx * 4 + 4])
self.particle_num = int(len(line) / 4)
return self.loc
class ClctMSD(object):
"""docstring for ClctMSD"""
def __init__(self, loc, data: dict, particle_num: int):
self.loc = loc
self.max_t = max_t
self.data = data
self.chunk_loc = {}
self.chunk_ori = {}
self.particle_num = particle_num
def chunk_raw(self):
flag = 0
self.chunk_loc[flag], self.chunk_ori[flag] = [], []
for i, item in enumerate(self.loc):
if item[0] != -1:
self.chunk_loc[flag].append((item[1], item[2]))
self.chunk_ori[flag].append(item[3])
if len(self.chunk_loc[flag]) != 0 and item[0] == -1:
flag += 1
self.chunk_loc[flag] = []
self.chunk_ori[flag] = []
else:
pass
@staticmethod
def clct_dis(x1, y1, x2, y2):
return (x1 - x2)**2 + (y1 - y2)**2
@staticmethod
def map2range(ori_list):
if len(ori_list) == 0:
return ori_list
ranged_ori_list = [ori_list[0]]
for i in range(1, len(ori_list)):
increment = ori_list[i] - ori_list[i - 1]
if increment > np.pi:
increment -= 2 * np.pi
if increment < -np.pi:
increment += 2 * np.pi
ranged_ori_list.append(ranged_ori_list[i - 1] + increment)
return ranged_ori_list
def output(self, ori_list, flag):
if not os.path.exists('{}/abs-angles'.format(cur_dir)):
os.mkdir('{}/abs-angles'.format(cur_dir))
if len(ori_list) <= 600:
return True
else:
with open('{}/abs-angles/{}_{}_ori.csv'.format(cur_dir, self.particle_num, flag), 'w') as wf:
# with open('{}/abs-angles/{}_ori.csv'.format(cur_dir, 'abs-agl-test', flag), 'w') as wf:
for ori in ori_list:
wf.write('{}\n'.format(ori))
def clctmsd(self, t):
sum_dx, sum_dy, sum_da = 0, 0, 0
sum_dr2, sum_da2, N = 0, 0, 0
sum_dxda, sum_dyda, sum_dxdy = 0, 0, 0
for flag, loc_list in self.chunk_loc.items():
ori_list = self.map2range(self.chunk_ori[flag])
for i in range(1, len(ori_list)):
if ori_list[i] - ori_list[i - 1] > np.pi:
print(i, ori_list[i], ori_list[i - 1])
self.output(ori_list, flag)
if len(loc_list) > t:
for i in range(len(loc_list) - t):
dx = loc_list[i + t][0] - loc_list[i][0]
dy = loc_list[i + t][1] - loc_list[i][1]
da = ori_list[i + t] - ori_list[i]
sum_dr2 += dx * dx + dy * dy
sum_da2 += da * da
sum_dx += dx
sum_dy += dy
sum_da += da
sum_dxdy += dx * dy
sum_dxda += dx * da
sum_dyda += dy * da
N += 1
if N == 0:
return (0, 0, 0, 0, 0, 0, 0, 0, 0)
else:
return (sum_dr2 / N, sum_da2 / N, sum_dx / N, sum_dy / N, sum_da / N, sum_dxdy / N, sum_dxda / N, sum_dyda / N, 1)
def find_msd_vs_t(self):
self.chunk_raw()
for t in range(self.max_t):
print(self.particle_num, t)
dr2, da2, dx, dy, da, dxdy, dxda, dyda, count = self.clctmsd(t)
if t not in self.data:
self.data[t] = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.data[t][0] += dr2
self.data[t][1] += da2
self.data[t][2] += dx
self.data[t][3] += dy
self.data[t][4] += da
self.data[t][5] += dxdy
self.data[t][6] += dxda
self.data[t][7] += dyda
self.data[t][8] += count
return self.data
if __name__ == '__main__':
data = {}
particle_0 = LoadAllxy(0)
particle_0_loc = particle_0.load_all_xy()
particle_num = particle_0.particle_num
data = ClctMSD(particle_0_loc, data, 0).find_msd_vs_t()
for num in range(1, particle_num):
print(num)
data = ClctMSD(LoadAllxy(num).load_all_xy(), data, num).find_msd_vs_t()
with open('{}/{}'.format(cur_dir, 'msd_vs_t-test0.csv'), 'w') as wf:
for key in sorted(data.keys()):
dr2, da2, dx, dy, da, dxdy, dxda, dyda, count = data[key][0], data[key][1], data[key][
2], data[key][3], data[key][4], data[key][5], data[key][6], data[key][7], data[key][8]
wf.write('{},{},{},{},{},{},{},{},{}'.format(key * dt, dr2 / count, da2 / count,
dx / count, dy / count, da / count, dxdy / count, dxda / count, dyda / count))
wf.write('\n') | en | 0.618418 | # import random # import matplotlib.pyplot as plt docstring for LoadAllxy # print(lx, ly, rx, ry, c_xy) docstring for ClctMSD # with open('{}/abs-angles/{}_ori.csv'.format(cur_dir, 'abs-agl-test', flag), 'w') as wf: | 2.427609 | 2 |
tests/test_recommend_programs.py | laowantong/paroxython | 31 | 6615374 | from typed_ast.ast3 import literal_eval
import json
from pathlib import Path
import pytest
from make_snapshot import make_snapshot
import context
from paroxython.recommend_programs import Recommendations
def test_recommend_program(capsys):
rec = Recommendations(
db=json.loads(Path("examples/dummy/programs_db.json").read_text()),
base_path=Path("examples/dummy/"),
)
rec.run_pipeline(literal_eval(Path("examples/dummy/pipe.py").read_text()))
print(rec.selected_programs)
assert rec.selected_programs == {
"prg2.py",
# "O/N/P",
# "Y/T/Q",
# "Y",
# "X/S/M/L/R/D",
# "O",
# "O/C/H/B",
# "X/S/M",
# "X/S/M/L/R",
# "Y/T",
# "O/C",
# "X/G",
# "X/S/M/L/V",
# "O/C/H/B/I",
"prg3.py",
# "O/N/P",
# "X/K",
# "Y/T",
# "X/S/M/L/V",
# "O/C/H/B",
# "X/S/M/L/R",
# "O/J",
# "X/S/M",
# "O/C/F/U",
# "O/C/H",
# "X/S",
# "Y",
# "O",
# "X/S/M/L",
# "Y/E",
}
print(rec.result)
assert rec.result == [
(5, "impart", ["prg8.py"]),
(6, "exclude", ["prg7.py", "prg9.py"]),
(7, "exclude", ["prg4.py", "prg5.py", "prg6.py"]),
(8, "include", ["prg1.py"]),
(9, "hide", []),
]
costs = {taxon: rec.assess.taxon_cost(taxon) for taxon in rec.db_programs["prg2.py"]["taxa"]}
print(costs)
assert costs == {
"O/N/P": 0,
"Y/T/Q": 0.375,
"Y": 0,
"X/S/M/L/R/D": 0,
"O": 0,
"O/C/H/B": 0,
"X/S/M": 0,
"X/S/M/L/R": 0,
"Y/T": 0.25,
"O/C": 0,
"X/G": 0.25,
"X/S/M/L/V": 0,
"O/C/H/B/I": 0.03125,
}
text = rec.get_markdown(span_column_width=10)
make_snapshot(Path("examples/dummy/programs_recommendations.md"), text, capsys)
def test_recommend_programming_idioms(capsys):
path = Path("examples/idioms/programs_db.json")
rec = Recommendations(db=json.loads(path.read_text()))
rec.run_pipeline()
output_path = path.parent / "programs_recommendations.md"
rec.get_markdown() # for coverage
make_snapshot(
output_path,
rec.get_markdown(sorting_strategy="lexicographic", grouping_strategy="no_group"),
capsys,
)
def test_recommend_mini_programs():
db = json.loads(Path("examples/mini/programs_db.json").read_text())
proper_taxa = {}
for program in ["assignment.py", "collatz.py", "fizzbuzz.py", "is_even.py"]:
proper_taxa[program] = set(db["programs"][program]["taxa"])
rec = Recommendations(db)
original = proper_taxa["fizzbuzz.py"] | proper_taxa["collatz.py"]
assert all(
taxon.startswith("meta")
for taxon in original.difference(db["programs"]["fizzbuzz.py"]["taxa"])
)
commands = [
{
"operation": "exclude",
"data": [
"assignment.py",
"fizzbuzz.py", # imported by is_even.py, consequently excluded
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
# A command excluding a sequence is equivalent to a sequence of excluding commands
commands = [
{"operation": "exclude", "data": ["assignment.py"]},
{"operation": "exclude", "data": ["fizzbuzz.py"]},
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": ["this_program_does_not_exist.py"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
"assignment.py",
"fizzbuzz.py", # imported by is_even.py, which nevertheless will not be included
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
# A command including a sequence is not equivalent to a sequence of including commands
commands = [
{"operation": "include", "data": ["assignment.py"]},
{"operation": "include", "data": ["fizzbuzz.py"]},
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "impart",
"data": [
"assignment.py", # exclude it, and impart its taxa
"fizzbuzz.py", # idem, but ignore its imports or exports
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py"}
assert proper_taxa["assignment.py"].issubset(rec.imparted_knowledge)
assert proper_taxa["fizzbuzz.py"].issubset(rec.imparted_knowledge)
assert not proper_taxa["is_even.py"].issubset(rec.imparted_knowledge)
assert not proper_taxa["collatz.py"].issubset(rec.imparted_knowledge)
commands = [{"operation": "impart", "data": ["operator/arithmetic"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py", "fizzbuzz.py", "is_even.py"}
print(rec.imparted_knowledge)
assert rec.imparted_knowledge == {
"operator/arithmetic/multiplication",
"operator/arithmetic/modulo",
"operator",
"operator/arithmetic",
"operator/arithmetic/addition",
}
commands = [
{
"operation": "exclude",
"data": [
"var/assignment/explicit/single", # featured directly by assignment.py
# and collatz.py, which is imported by fizzbuzz.py and is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
"flow/conditional/else/if", # featured directly by fizzbuzz.py,
# which is imported by is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
commands = [
{
"operation": "include",
"data": [
("meta/program", "not contains", "flow/conditional/else/if"),
# There is a subtle difference with the previous one, since "exclude" follows the
# importations, while "include" does not.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py", "is_even.py"}
commands = [
{
"operation": "include",
"data": [
"flow/conditional/else/if",
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py"}
commands = [
{
"operation": "exclude",
"data": [
("meta/program", "not contains", "flow/conditional/else/if"),
# "assignment.py", "is_even.py", "collatz.py" are excluded since they don't feature
# an `elif`. "fizzbuzz.py" features an `elif`, but is excluded since it imports an
# excluded program ("collatz.py").
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
commands = [
{
"operation": "exclude",
"data": [
"flow/conditional/else/if", # Although not recommended, it is possible to mix
"assignment.py" # taxa and programs (ending with ".py") in a same command.
# Crucially, this avoids to specify whether the command should be applied on
# taxa or programs.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
"var/assignment/explicit/single", # featured by assignment.py and collatz.py
# Although the latter is imported by both fizzbuzz.py and is_even.py, they are
# not included in the result
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": ["this_taxon_does_not_exist"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "inside", "flow/loop"),
], # featured by collatz.py only
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("var/assignment/explicit", "inside", "flow/loop"), # featured by collatz.py,
# and indirectly by fizzbuzz.py and is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "not inside", "flow/loop"), # Must read as:
# Include all programs featuring an assignment, except those where this assignment
# is inside a loop. Hence, this includes assignment.py, even if it does not feature
# a loop.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "inside", "meta/program"), # This comes down to
# including all programs featuring an assignment.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "not inside", "meta/program"), # This comes down to
# exclude all programs either featuring or not featuring an assignment!
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "impart", # Imparting triples is currently not supported (ignored).
"data": [("var/assignment/explicit", "inside", "flow/loop")],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": 42}] # malformed source => ignored command
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": [42]}] # malformed pattern => ignored pattern
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == set()
assert not rec.imparted_knowledge
commands = [{"data": []}] # a command without operation is ignored
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "undefined_command", # an undefined command is ignored
"data": "assignment.py",
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("var/assignment/explicit/single", "after", "call/subroutine/builtin/print"),
# collatz.py and fizzbuzz.py have an assignment after a print.
# is_even.py imports fizzbuzz.py.
# Consequently, these three programs are excluded.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("operator/arithmetic/addition", "equals", "operator/arithmetic/multiplication"),
# "operator/arithmetic/addition" and "operator/arithmetic/multiplication" are both
# featured on the same line of collatz.py, and indirectly by fizzbuzz.py and
# is_even.py. Therefore, excluding this taxon keeps only assignment.py.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("condition/equality", "inside", "def/subroutine/function"),
# "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not
# imported anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "assignment.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("call/subroutine/builtin/range", "inside", "flow/conditional"),
# "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"is_even.py",
"fizzbuzz.py",
"assignment.py",
"collatz.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit/single", "after", "call/subroutine/builtin/print"),
# The taxon "var/assignment/explicit/single" is featured by assignment.py and
# collatz.py. In collatz.py, it appears after a taxon "call/subroutine/builtin/print".
# Consequently, it should be included in the results, but not the programs which
# import it: fizzbuzz.py and is_even.py.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("operator/arithmetic/modulo", "equals", "type/number/integer/literal"),
# "operator/arithmetic/modulo" and "type/number/integer/literal" are both featured
# on the same line in all programs except assignment.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("operator/arithmetic/modulo", "x == y", "type/number/integer/literal"),
# The same with "x == y" instead of "equals"
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("condition/equality", "inside", "def/subroutine/function"),
# "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not
# imported anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"is_even.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("condition/equality$", "inside", "def"),
# "condition/equality" (strictly, note the dollar sign) is inside "def/subroutine/function"
# in is_even.py and inside "def/subroutine/procedure" in collatz.py. Both will be
# included.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/range", "inside", "flow/conditional"),
# "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/print", "is", "call/subroutine/builtin/print"),
# "call/subroutine/builtin/print" may appear several times in the same program, but
# never on the same line.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("type/number/integer/literal$", "is", "type/number/integer/literal$"),
# "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and
# collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("type/number/integer/literal$", "is", "type/number/integer/literal$"),
# "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and
# collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/print", "not inside", "flow/loop/exit/late"),
# A print statement is featured inside a loop by both collatz.py and fizzbuzz.py.
# However, in collatz.py, there exists a print statement which is not inside the
# loop. This makes it satisfy the predicate. Note that assignment.py and is_even.py
# are not included in the result, since they don't feature (at least directly)
# "call/subroutine/builtin/print".
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("call/subroutine/builtin/print", "not inside", "flow/loop"),
# Exclude the programs which feature a print statement outside a loop. This does
# not exclude assignment.py, which does not feature a print statement. This
# excludes collatz, which features a print statement outside a loop, even if it
# also features a print statement inside a loop. fizzbuzz.py and is_even.py are
# excluded too, since they import collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
db = json.loads(Path("examples/simple/programs_db.json").read_text())
# extract_1 (start)
a = "flow/conditional"
b = "flow/loop"
_a = ("meta/program", "not contains", "flow/conditional")
_b = ("meta/program", "not contains", "flow/loop")
p0 = "01_hello_world.py" # [ ] loop [ ] conditional
p1 = "03_friends.py" # [X] loop [ ] conditional
p2 = "14_median.py" # [ ] loop [X] conditional
p3 = "06_regex.py" # [X] loop [X] conditional
base_1 = [p0, p1, p2, p3]
# fmt: off
pipelines_1 = [
(set() , [("include", [a]), ("include", [_a])]), # a & ~a
({ p2, p3}, [("include", [a])]), # a
({p0, p1, }, [("include", [_a])]), # ~a
({ p1, p3}, [("include", [b])]), # b
({p0, p2, }, [("include", [_b])]), # ~b
({ p1, p2, p3}, [("include", [a, b])]), # a | b
({p0, }, [("include", [_a]), ("include", [_b])]), # ~a & ~b
({ p3}, [("include", [a]), ("include", [b])]), # a & b
({p0, p1, p2, }, [("include", [_a, _b])]), # ~a | ~b
({p0, p1, p3}, [("include", [_a, b])]), # ~a | b
({ p2, }, [("include", [a]), ("include", [_b])]), # a & ~b
({ p1, }, [("include", [_a]), ("include", [b])]), # ~a & b
({p0, p2, p3}, [("include", [a, _b])]), # a | ~b
({p0, p3}, [("include", [_a, b]), ("include", [a, _b])]), # (~a | b) & (a | ~b)
({ p1, p2, }, [("include", [a, b]), ("include", [_a, _b])]), # (a | b) & (~a | ~b)
({p0, p1, p2, p3}, [("include", [a, _a]), ("include", [b, _b])]), # (a | ~a) & (b | ~b)
]
# fmt: on
# extract_1 (stop)
@pytest.mark.parametrize("expected_programs, commands", pipelines_1)
def test_recommend_simple_programs_1(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_1}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
holds_abstr = "def"
holds_assignment = "var/assignment/explicit"
holds_asg_in_sub = ("var/assignment/explicit", "inside", "def")
lacks_assignment = ("meta/program", "not contains", "var/assignment/explicit")
lacks_abstr = ("meta/program", "not contains", "def")
lacks_asg_or_sub = ("meta/program", "not contains", "def|var/assignment/explicit")
lacks_asg_in_sub = ("def", "not contains", "var/assignment/explicit")
p0 = "01_hello_world.py" # [ ] def [ ] assignment [ ] inside *
p1 = "05_greet.py" # [X] def [ ] assignment [ ] inside
p2 = "02_input_name.py" # [ ] def [X] assignment [ ] inside
p3 = "16_csv.py" # [X] def [X] assignment [ ] inside *
p4 = "12_classes.py" # [X] def [X] assignment [X] inside
base_2 = [p0, p1, p2, p3, p4]
# fmt: off
pipelines_2 = [
({ p1, p2, p3, p4}, [("include", [holds_abstr, holds_assignment])]),
({p0, }, [("exclude", [holds_abstr, holds_assignment])]),
({ p1, }, [("include all", [holds_abstr, lacks_assignment])]),
({p0, p2, p3, p4}, [("exclude all", [holds_abstr, lacks_assignment])]),
({p0, p1, p3, p4}, [("include", [holds_abstr, lacks_assignment])]),
({ p2, }, [("exclude", [holds_abstr, lacks_assignment])]),
({p0, p1, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr, lacks_assignment])]),
({ p3 }, [("exclude", [holds_asg_in_sub, lacks_abstr, lacks_assignment])]),
({ p4}, [("include", [holds_asg_in_sub])]),
({p0, p1, p2, p3 }, [("exclude", [holds_asg_in_sub])]),
({ p2, p3, p4}, [("include", [holds_assignment])]),
({p0, p1, }, [("exclude", [holds_assignment])]),
({ p1, p3, p4}, [("include", [holds_abstr])]),
({p0, p2, }, [("exclude", [holds_abstr])]),
({p0, p1, p4}, [("include", [holds_asg_in_sub, lacks_assignment])]),
({ p2, p3 }, [("exclude", [holds_asg_in_sub, lacks_assignment])]),
({ p3, p4}, [("include all", [holds_abstr, holds_assignment])]),
({p0, p1, p2, }, [("exclude all", [holds_abstr, holds_assignment])]),
({p0, p4}, [("include", [lacks_asg_or_sub, holds_asg_in_sub])]),
({ p1, p2, p3 }, [("exclude", [lacks_asg_or_sub, holds_asg_in_sub])]),
({p0, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr])]),
({ p1, p3, }, [("exclude", [holds_asg_in_sub, lacks_abstr])]),
({ p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr]),
("exclude", [lacks_asg_or_sub])]),
({p0, p1, p3 }, [("include", [holds_abstr, lacks_assignment]),
("exclude", [holds_asg_in_sub])]),
({ p1, p4}, [("include", [holds_asg_in_sub, lacks_assignment]),
("exclude", [lacks_asg_or_sub])]),
({p0, p2, p3 }, [("include", [holds_assignment, lacks_abstr]),
("exclude", [holds_asg_in_sub])]),
({ p1, p2, }, [("include", [holds_abstr, holds_assignment]),
("exclude all", [holds_abstr, holds_assignment])]),
({p0, p3, p4}, [("include", [holds_abstr, lacks_assignment]),
("exclude all", [holds_abstr, lacks_assignment])]),
({p0, p3 }, [("include", [holds_abstr, lacks_assignment]),
("exclude all", [holds_abstr, lacks_assignment]),
("exclude", [holds_asg_in_sub])]),
({ p1, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr, lacks_assignment]),
("exclude", [lacks_asg_or_sub])]),
({p0, p1, p2, p3, p4}, []),
]
# fmt: on
@pytest.mark.parametrize("expected_programs, commands", pipelines_2)
def test_recommend_simple_programs_2(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_2}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
# extract_2 (start)
holds_parallel_tuple = "var/assignment/explicit/parallel"
holds_ordinary_tuple = ("type/sequence/tuple", "is not", "var/assignment/explicit/parallel")
lacks_parallel_tuple = ("meta/program", "not contains", "var/assignment/explicit/parallel")
p0 = "01_hello_world.py" # [ ] ordinary tuple [ ] parallel tuple
p1 = "11_bottles.py" # [X] ordinary tuple [ ] parallel tuple
p2 = "04_fibonacci.py" # [ ] ordinary tuple [X] parallel tuple
p3 = "18_queens.py" # [X] ordinary tuple [X] parallel tuple
base_3 = [p0, p1, p2, p3]
# fmt: off
pipelines_3 = [
({ p2, p3}, [("include", [holds_parallel_tuple])]),
({p0, p1, }, [("exclude", [holds_parallel_tuple])]),
({ p1, p3}, [("include", [holds_ordinary_tuple])]),
({p0, p2, }, [("exclude", [holds_ordinary_tuple])]),
({ p1, p2, p3}, [("include", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, }, [("exclude", [holds_parallel_tuple, holds_ordinary_tuple])]),
({ p3}, [("include all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p2, }, [("exclude all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p3}, [("include", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p2, }, [("exclude", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p1, }, [("include all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({p0, p2, p3}, [("exclude all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({p0, p3}, [("include", [lacks_parallel_tuple, holds_ordinary_tuple]),
("exclude all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p1, p2, }, [("include", [holds_parallel_tuple, holds_ordinary_tuple]),
("exclude all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p2, p3}, []),
]
# fmt: on
# extract_2 (stop)
@pytest.mark.parametrize("expected_programs, commands", pipelines_3)
def test_recommend_simple_programs_3(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_3}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
if __name__ == "__main__":
pytest.main(["-qq", __import__("sys").argv[0]])
| from typed_ast.ast3 import literal_eval
import json
from pathlib import Path
import pytest
from make_snapshot import make_snapshot
import context
from paroxython.recommend_programs import Recommendations
def test_recommend_program(capsys):
rec = Recommendations(
db=json.loads(Path("examples/dummy/programs_db.json").read_text()),
base_path=Path("examples/dummy/"),
)
rec.run_pipeline(literal_eval(Path("examples/dummy/pipe.py").read_text()))
print(rec.selected_programs)
assert rec.selected_programs == {
"prg2.py",
# "O/N/P",
# "Y/T/Q",
# "Y",
# "X/S/M/L/R/D",
# "O",
# "O/C/H/B",
# "X/S/M",
# "X/S/M/L/R",
# "Y/T",
# "O/C",
# "X/G",
# "X/S/M/L/V",
# "O/C/H/B/I",
"prg3.py",
# "O/N/P",
# "X/K",
# "Y/T",
# "X/S/M/L/V",
# "O/C/H/B",
# "X/S/M/L/R",
# "O/J",
# "X/S/M",
# "O/C/F/U",
# "O/C/H",
# "X/S",
# "Y",
# "O",
# "X/S/M/L",
# "Y/E",
}
print(rec.result)
assert rec.result == [
(5, "impart", ["prg8.py"]),
(6, "exclude", ["prg7.py", "prg9.py"]),
(7, "exclude", ["prg4.py", "prg5.py", "prg6.py"]),
(8, "include", ["prg1.py"]),
(9, "hide", []),
]
costs = {taxon: rec.assess.taxon_cost(taxon) for taxon in rec.db_programs["prg2.py"]["taxa"]}
print(costs)
assert costs == {
"O/N/P": 0,
"Y/T/Q": 0.375,
"Y": 0,
"X/S/M/L/R/D": 0,
"O": 0,
"O/C/H/B": 0,
"X/S/M": 0,
"X/S/M/L/R": 0,
"Y/T": 0.25,
"O/C": 0,
"X/G": 0.25,
"X/S/M/L/V": 0,
"O/C/H/B/I": 0.03125,
}
text = rec.get_markdown(span_column_width=10)
make_snapshot(Path("examples/dummy/programs_recommendations.md"), text, capsys)
def test_recommend_programming_idioms(capsys):
path = Path("examples/idioms/programs_db.json")
rec = Recommendations(db=json.loads(path.read_text()))
rec.run_pipeline()
output_path = path.parent / "programs_recommendations.md"
rec.get_markdown() # for coverage
make_snapshot(
output_path,
rec.get_markdown(sorting_strategy="lexicographic", grouping_strategy="no_group"),
capsys,
)
def test_recommend_mini_programs():
db = json.loads(Path("examples/mini/programs_db.json").read_text())
proper_taxa = {}
for program in ["assignment.py", "collatz.py", "fizzbuzz.py", "is_even.py"]:
proper_taxa[program] = set(db["programs"][program]["taxa"])
rec = Recommendations(db)
original = proper_taxa["fizzbuzz.py"] | proper_taxa["collatz.py"]
assert all(
taxon.startswith("meta")
for taxon in original.difference(db["programs"]["fizzbuzz.py"]["taxa"])
)
commands = [
{
"operation": "exclude",
"data": [
"assignment.py",
"fizzbuzz.py", # imported by is_even.py, consequently excluded
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
# A command excluding a sequence is equivalent to a sequence of excluding commands
commands = [
{"operation": "exclude", "data": ["assignment.py"]},
{"operation": "exclude", "data": ["fizzbuzz.py"]},
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": ["this_program_does_not_exist.py"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
"assignment.py",
"fizzbuzz.py", # imported by is_even.py, which nevertheless will not be included
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
# A command including a sequence is not equivalent to a sequence of including commands
commands = [
{"operation": "include", "data": ["assignment.py"]},
{"operation": "include", "data": ["fizzbuzz.py"]},
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "impart",
"data": [
"assignment.py", # exclude it, and impart its taxa
"fizzbuzz.py", # idem, but ignore its imports or exports
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py"}
assert proper_taxa["assignment.py"].issubset(rec.imparted_knowledge)
assert proper_taxa["fizzbuzz.py"].issubset(rec.imparted_knowledge)
assert not proper_taxa["is_even.py"].issubset(rec.imparted_knowledge)
assert not proper_taxa["collatz.py"].issubset(rec.imparted_knowledge)
commands = [{"operation": "impart", "data": ["operator/arithmetic"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py", "fizzbuzz.py", "is_even.py"}
print(rec.imparted_knowledge)
assert rec.imparted_knowledge == {
"operator/arithmetic/multiplication",
"operator/arithmetic/modulo",
"operator",
"operator/arithmetic",
"operator/arithmetic/addition",
}
commands = [
{
"operation": "exclude",
"data": [
"var/assignment/explicit/single", # featured directly by assignment.py
# and collatz.py, which is imported by fizzbuzz.py and is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
"flow/conditional/else/if", # featured directly by fizzbuzz.py,
# which is imported by is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
commands = [
{
"operation": "include",
"data": [
("meta/program", "not contains", "flow/conditional/else/if"),
# There is a subtle difference with the previous one, since "exclude" follows the
# importations, while "include" does not.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py", "is_even.py"}
commands = [
{
"operation": "include",
"data": [
"flow/conditional/else/if",
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py"}
commands = [
{
"operation": "exclude",
"data": [
("meta/program", "not contains", "flow/conditional/else/if"),
# "assignment.py", "is_even.py", "collatz.py" are excluded since they don't feature
# an `elif`. "fizzbuzz.py" features an `elif`, but is excluded since it imports an
# excluded program ("collatz.py").
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
commands = [
{
"operation": "exclude",
"data": [
"flow/conditional/else/if", # Although not recommended, it is possible to mix
"assignment.py" # taxa and programs (ending with ".py") in a same command.
# Crucially, this avoids to specify whether the command should be applied on
# taxa or programs.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
"var/assignment/explicit/single", # featured by assignment.py and collatz.py
# Although the latter is imported by both fizzbuzz.py and is_even.py, they are
# not included in the result
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": ["this_taxon_does_not_exist"]}]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "inside", "flow/loop"),
], # featured by collatz.py only
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("var/assignment/explicit", "inside", "flow/loop"), # featured by collatz.py,
# and indirectly by fizzbuzz.py and is_even.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "not inside", "flow/loop"), # Must read as:
# Include all programs featuring an assignment, except those where this assignment
# is inside a loop. Hence, this includes assignment.py, even if it does not feature
# a loop.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "inside", "meta/program"), # This comes down to
# including all programs featuring an assignment.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert rec.selected_programs == {"assignment.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit", "not inside", "meta/program"), # This comes down to
# exclude all programs either featuring or not featuring an assignment!
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "impart", # Imparting triples is currently not supported (ignored).
"data": [("var/assignment/explicit", "inside", "flow/loop")],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": 42}] # malformed source => ignored command
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [{"operation": "include", "data": [42]}] # malformed pattern => ignored pattern
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == set()
assert not rec.imparted_knowledge
commands = [{"data": []}] # a command without operation is ignored
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "undefined_command", # an undefined command is ignored
"data": "assignment.py",
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"assignment.py",
"collatz.py",
"fizzbuzz.py",
"is_even.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("var/assignment/explicit/single", "after", "call/subroutine/builtin/print"),
# collatz.py and fizzbuzz.py have an assignment after a print.
# is_even.py imports fizzbuzz.py.
# Consequently, these three programs are excluded.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("operator/arithmetic/addition", "equals", "operator/arithmetic/multiplication"),
# "operator/arithmetic/addition" and "operator/arithmetic/multiplication" are both
# featured on the same line of collatz.py, and indirectly by fizzbuzz.py and
# is_even.py. Therefore, excluding this taxon keeps only assignment.py.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("condition/equality", "inside", "def/subroutine/function"),
# "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not
# imported anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "assignment.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("call/subroutine/builtin/range", "inside", "flow/conditional"),
# "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {
"is_even.py",
"fizzbuzz.py",
"assignment.py",
"collatz.py",
}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("var/assignment/explicit/single", "after", "call/subroutine/builtin/print"),
# The taxon "var/assignment/explicit/single" is featured by assignment.py and
# collatz.py. In collatz.py, it appears after a taxon "call/subroutine/builtin/print".
# Consequently, it should be included in the results, but not the programs which
# import it: fizzbuzz.py and is_even.py.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("operator/arithmetic/modulo", "equals", "type/number/integer/literal"),
# "operator/arithmetic/modulo" and "type/number/integer/literal" are both featured
# on the same line in all programs except assignment.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("operator/arithmetic/modulo", "x == y", "type/number/integer/literal"),
# The same with "x == y" instead of "equals"
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py", "fizzbuzz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("condition/equality", "inside", "def/subroutine/function"),
# "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not
# imported anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"is_even.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("condition/equality$", "inside", "def"),
# "condition/equality" (strictly, note the dollar sign) is inside "def/subroutine/function"
# in is_even.py and inside "def/subroutine/procedure" in collatz.py. Both will be
# included.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py", "is_even.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/range", "inside", "flow/conditional"),
# "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/print", "is", "call/subroutine/builtin/print"),
# "call/subroutine/builtin/print" may appear several times in the same program, but
# never on the same line.
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert not rec.selected_programs
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("type/number/integer/literal$", "is", "type/number/integer/literal$"),
# "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and
# collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("type/number/integer/literal$", "is", "type/number/integer/literal$"),
# "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and
# collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"fizzbuzz.py", "collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "include",
"data": [
("call/subroutine/builtin/print", "not inside", "flow/loop/exit/late"),
# A print statement is featured inside a loop by both collatz.py and fizzbuzz.py.
# However, in collatz.py, there exists a print statement which is not inside the
# loop. This makes it satisfy the predicate. Note that assignment.py and is_even.py
# are not included in the result, since they don't feature (at least directly)
# "call/subroutine/builtin/print".
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
assert rec.selected_programs == {"collatz.py"}
assert not rec.imparted_knowledge
commands = [
{
"operation": "exclude",
"data": [
("call/subroutine/builtin/print", "not inside", "flow/loop"),
# Exclude the programs which feature a print statement outside a loop. This does
# not exclude assignment.py, which does not feature a print statement. This
# excludes collatz, which features a print statement outside a loop, even if it
# also features a print statement inside a loop. fizzbuzz.py and is_even.py are
# excluded too, since they import collatz.py
],
}
]
rec = Recommendations(db)
rec.run_pipeline(commands)
print(rec.selected_programs)
assert rec.selected_programs == {"assignment.py"}
assert not rec.imparted_knowledge
db = json.loads(Path("examples/simple/programs_db.json").read_text())
# extract_1 (start)
a = "flow/conditional"
b = "flow/loop"
_a = ("meta/program", "not contains", "flow/conditional")
_b = ("meta/program", "not contains", "flow/loop")
p0 = "01_hello_world.py" # [ ] loop [ ] conditional
p1 = "03_friends.py" # [X] loop [ ] conditional
p2 = "14_median.py" # [ ] loop [X] conditional
p3 = "06_regex.py" # [X] loop [X] conditional
base_1 = [p0, p1, p2, p3]
# fmt: off
pipelines_1 = [
(set() , [("include", [a]), ("include", [_a])]), # a & ~a
({ p2, p3}, [("include", [a])]), # a
({p0, p1, }, [("include", [_a])]), # ~a
({ p1, p3}, [("include", [b])]), # b
({p0, p2, }, [("include", [_b])]), # ~b
({ p1, p2, p3}, [("include", [a, b])]), # a | b
({p0, }, [("include", [_a]), ("include", [_b])]), # ~a & ~b
({ p3}, [("include", [a]), ("include", [b])]), # a & b
({p0, p1, p2, }, [("include", [_a, _b])]), # ~a | ~b
({p0, p1, p3}, [("include", [_a, b])]), # ~a | b
({ p2, }, [("include", [a]), ("include", [_b])]), # a & ~b
({ p1, }, [("include", [_a]), ("include", [b])]), # ~a & b
({p0, p2, p3}, [("include", [a, _b])]), # a | ~b
({p0, p3}, [("include", [_a, b]), ("include", [a, _b])]), # (~a | b) & (a | ~b)
({ p1, p2, }, [("include", [a, b]), ("include", [_a, _b])]), # (a | b) & (~a | ~b)
({p0, p1, p2, p3}, [("include", [a, _a]), ("include", [b, _b])]), # (a | ~a) & (b | ~b)
]
# fmt: on
# extract_1 (stop)
@pytest.mark.parametrize("expected_programs, commands", pipelines_1)
def test_recommend_simple_programs_1(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_1}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
holds_abstr = "def"
holds_assignment = "var/assignment/explicit"
holds_asg_in_sub = ("var/assignment/explicit", "inside", "def")
lacks_assignment = ("meta/program", "not contains", "var/assignment/explicit")
lacks_abstr = ("meta/program", "not contains", "def")
lacks_asg_or_sub = ("meta/program", "not contains", "def|var/assignment/explicit")
lacks_asg_in_sub = ("def", "not contains", "var/assignment/explicit")
p0 = "01_hello_world.py" # [ ] def [ ] assignment [ ] inside *
p1 = "05_greet.py" # [X] def [ ] assignment [ ] inside
p2 = "02_input_name.py" # [ ] def [X] assignment [ ] inside
p3 = "16_csv.py" # [X] def [X] assignment [ ] inside *
p4 = "12_classes.py" # [X] def [X] assignment [X] inside
base_2 = [p0, p1, p2, p3, p4]
# fmt: off
pipelines_2 = [
({ p1, p2, p3, p4}, [("include", [holds_abstr, holds_assignment])]),
({p0, }, [("exclude", [holds_abstr, holds_assignment])]),
({ p1, }, [("include all", [holds_abstr, lacks_assignment])]),
({p0, p2, p3, p4}, [("exclude all", [holds_abstr, lacks_assignment])]),
({p0, p1, p3, p4}, [("include", [holds_abstr, lacks_assignment])]),
({ p2, }, [("exclude", [holds_abstr, lacks_assignment])]),
({p0, p1, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr, lacks_assignment])]),
({ p3 }, [("exclude", [holds_asg_in_sub, lacks_abstr, lacks_assignment])]),
({ p4}, [("include", [holds_asg_in_sub])]),
({p0, p1, p2, p3 }, [("exclude", [holds_asg_in_sub])]),
({ p2, p3, p4}, [("include", [holds_assignment])]),
({p0, p1, }, [("exclude", [holds_assignment])]),
({ p1, p3, p4}, [("include", [holds_abstr])]),
({p0, p2, }, [("exclude", [holds_abstr])]),
({p0, p1, p4}, [("include", [holds_asg_in_sub, lacks_assignment])]),
({ p2, p3 }, [("exclude", [holds_asg_in_sub, lacks_assignment])]),
({ p3, p4}, [("include all", [holds_abstr, holds_assignment])]),
({p0, p1, p2, }, [("exclude all", [holds_abstr, holds_assignment])]),
({p0, p4}, [("include", [lacks_asg_or_sub, holds_asg_in_sub])]),
({ p1, p2, p3 }, [("exclude", [lacks_asg_or_sub, holds_asg_in_sub])]),
({p0, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr])]),
({ p1, p3, }, [("exclude", [holds_asg_in_sub, lacks_abstr])]),
({ p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr]),
("exclude", [lacks_asg_or_sub])]),
({p0, p1, p3 }, [("include", [holds_abstr, lacks_assignment]),
("exclude", [holds_asg_in_sub])]),
({ p1, p4}, [("include", [holds_asg_in_sub, lacks_assignment]),
("exclude", [lacks_asg_or_sub])]),
({p0, p2, p3 }, [("include", [holds_assignment, lacks_abstr]),
("exclude", [holds_asg_in_sub])]),
({ p1, p2, }, [("include", [holds_abstr, holds_assignment]),
("exclude all", [holds_abstr, holds_assignment])]),
({p0, p3, p4}, [("include", [holds_abstr, lacks_assignment]),
("exclude all", [holds_abstr, lacks_assignment])]),
({p0, p3 }, [("include", [holds_abstr, lacks_assignment]),
("exclude all", [holds_abstr, lacks_assignment]),
("exclude", [holds_asg_in_sub])]),
({ p1, p2, p4}, [("include", [holds_asg_in_sub, lacks_abstr, lacks_assignment]),
("exclude", [lacks_asg_or_sub])]),
({p0, p1, p2, p3, p4}, []),
]
# fmt: on
@pytest.mark.parametrize("expected_programs, commands", pipelines_2)
def test_recommend_simple_programs_2(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_2}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
# extract_2 (start)
holds_parallel_tuple = "var/assignment/explicit/parallel"
holds_ordinary_tuple = ("type/sequence/tuple", "is not", "var/assignment/explicit/parallel")
lacks_parallel_tuple = ("meta/program", "not contains", "var/assignment/explicit/parallel")
p0 = "01_hello_world.py" # [ ] ordinary tuple [ ] parallel tuple
p1 = "11_bottles.py" # [X] ordinary tuple [ ] parallel tuple
p2 = "04_fibonacci.py" # [ ] ordinary tuple [X] parallel tuple
p3 = "18_queens.py" # [X] ordinary tuple [X] parallel tuple
base_3 = [p0, p1, p2, p3]
# fmt: off
pipelines_3 = [
({ p2, p3}, [("include", [holds_parallel_tuple])]),
({p0, p1, }, [("exclude", [holds_parallel_tuple])]),
({ p1, p3}, [("include", [holds_ordinary_tuple])]),
({p0, p2, }, [("exclude", [holds_ordinary_tuple])]),
({ p1, p2, p3}, [("include", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, }, [("exclude", [holds_parallel_tuple, holds_ordinary_tuple])]),
({ p3}, [("include all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p2, }, [("exclude all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p3}, [("include", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p2, }, [("exclude", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p1, }, [("include all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({p0, p2, p3}, [("exclude all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({p0, p3}, [("include", [lacks_parallel_tuple, holds_ordinary_tuple]),
("exclude all", [lacks_parallel_tuple, holds_ordinary_tuple])]),
({ p1, p2, }, [("include", [holds_parallel_tuple, holds_ordinary_tuple]),
("exclude all", [holds_parallel_tuple, holds_ordinary_tuple])]),
({p0, p1, p2, p3}, []),
]
# fmt: on
# extract_2 (stop)
@pytest.mark.parametrize("expected_programs, commands", pipelines_3)
def test_recommend_simple_programs_3(expected_programs, commands):
rec = Recommendations(db)
rec.run_pipeline(
[{"operation": operation, "data": data} for (operation, data) in commands]
+ [{"operation": "include", "data": base_3}]
)
print(rec.selected_programs)
assert rec.selected_programs == expected_programs
if __name__ == "__main__":
pytest.main(["-qq", __import__("sys").argv[0]])
| en | 0.900728 | # "O/N/P", # "Y/T/Q", # "Y", # "X/S/M/L/R/D", # "O", # "O/C/H/B", # "X/S/M", # "X/S/M/L/R", # "Y/T", # "O/C", # "X/G", # "X/S/M/L/V", # "O/C/H/B/I", # "O/N/P", # "X/K", # "Y/T", # "X/S/M/L/V", # "O/C/H/B", # "X/S/M/L/R", # "O/J", # "X/S/M", # "O/C/F/U", # "O/C/H", # "X/S", # "Y", # "O", # "X/S/M/L", # "Y/E", # for coverage # imported by is_even.py, consequently excluded # A command excluding a sequence is equivalent to a sequence of excluding commands # imported by is_even.py, which nevertheless will not be included # A command including a sequence is not equivalent to a sequence of including commands # exclude it, and impart its taxa # idem, but ignore its imports or exports # featured directly by assignment.py # and collatz.py, which is imported by fizzbuzz.py and is_even.py # featured directly by fizzbuzz.py, # which is imported by is_even.py # There is a subtle difference with the previous one, since "exclude" follows the # importations, while "include" does not. # "assignment.py", "is_even.py", "collatz.py" are excluded since they don't feature # an `elif`. "fizzbuzz.py" features an `elif`, but is excluded since it imports an # excluded program ("collatz.py"). # Although not recommended, it is possible to mix # taxa and programs (ending with ".py") in a same command. # Crucially, this avoids to specify whether the command should be applied on # taxa or programs. # featured by assignment.py and collatz.py # Although the latter is imported by both fizzbuzz.py and is_even.py, they are # not included in the result # featured by collatz.py only # featured by collatz.py, # and indirectly by fizzbuzz.py and is_even.py # Must read as: # Include all programs featuring an assignment, except those where this assignment # is inside a loop. Hence, this includes assignment.py, even if it does not feature # a loop. # This comes down to # including all programs featuring an assignment. # This comes down to # exclude all programs either featuring or not featuring an assignment! # Imparting triples is currently not supported (ignored). # malformed source => ignored command # malformed pattern => ignored pattern # a command without operation is ignored # an undefined command is ignored # collatz.py and fizzbuzz.py have an assignment after a print. # is_even.py imports fizzbuzz.py. # Consequently, these three programs are excluded. # "operator/arithmetic/addition" and "operator/arithmetic/multiplication" are both # featured on the same line of collatz.py, and indirectly by fizzbuzz.py and # is_even.py. Therefore, excluding this taxon keeps only assignment.py. # "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not # imported anywhere. # "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere. # The taxon "var/assignment/explicit/single" is featured by assignment.py and # collatz.py. In collatz.py, it appears after a taxon "call/subroutine/builtin/print". # Consequently, it should be included in the results, but not the programs which # import it: fizzbuzz.py and is_even.py. # "operator/arithmetic/modulo" and "type/number/integer/literal" are both featured # on the same line in all programs except assignment.py # The same with "x == y" instead of "equals" # "condition/equality" is inside "def/subroutine/function" in is_even.py, which is not # imported anywhere. # "condition/equality" (strictly, note the dollar sign) is inside "def/subroutine/function" # in is_even.py and inside "def/subroutine/procedure" in collatz.py. Both will be # included. # "call/subroutine/builtin/range" is not inside "flow/conditional" anywhere. # "call/subroutine/builtin/print" may appear several times in the same program, but # never on the same line. # "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and # collatz.py # "type/number/integer/literal" appears twice on the same line in fizzbuzz.py and # collatz.py # A print statement is featured inside a loop by both collatz.py and fizzbuzz.py. # However, in collatz.py, there exists a print statement which is not inside the # loop. This makes it satisfy the predicate. Note that assignment.py and is_even.py # are not included in the result, since they don't feature (at least directly) # "call/subroutine/builtin/print". # Exclude the programs which feature a print statement outside a loop. This does # not exclude assignment.py, which does not feature a print statement. This # excludes collatz, which features a print statement outside a loop, even if it # also features a print statement inside a loop. fizzbuzz.py and is_even.py are # excluded too, since they import collatz.py # extract_1 (start) # [ ] loop [ ] conditional # [X] loop [ ] conditional # [ ] loop [X] conditional # [X] loop [X] conditional # fmt: off # a & ~a # a # ~a # b # ~b # a | b # ~a & ~b # a & b # ~a | ~b # ~a | b # a & ~b # ~a & b # a | ~b # (~a | b) & (a | ~b) # (a | b) & (~a | ~b) # (a | ~a) & (b | ~b) # fmt: on # extract_1 (stop) # [ ] def [ ] assignment [ ] inside * # [X] def [ ] assignment [ ] inside # [ ] def [X] assignment [ ] inside # [X] def [X] assignment [ ] inside * # [X] def [X] assignment [X] inside # fmt: off # fmt: on # extract_2 (start) # [ ] ordinary tuple [ ] parallel tuple # [X] ordinary tuple [ ] parallel tuple # [ ] ordinary tuple [X] parallel tuple # [X] ordinary tuple [X] parallel tuple # fmt: off # fmt: on # extract_2 (stop) | 2.228768 | 2 |
mongobox/unittest.py | infinitio/mongobox | 20 | 6615375 | <reponame>infinitio/mongobox
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from unittest import TestCase
import os
try:
import pymongo
except ImportError:
raise ImportError('PyMongo is required for MongoTestCase')
class MongoTestCase(TestCase):
'''A base for Mongo DB driven test cases. Provides
:class:`pymongo.MongoClient` instance in :attribute:`mongo_client`
and has a :method:`purge_database` helper method for database cleanup.
It is expected that tests are run from `nose` with `--with-mongobox` flag
that brings up a sandboxed instance of Mongo.
'''
__mongo_client = None
@property
def mongo_client(self):
'''Returns an instance of :class:`pymongo.MongoClient` connected
to MongoBox database instance.
'''
if not self.__mongo_client:
try:
port = int(os.getenv('MONGOBOX_PORT'))
self.__mongo_client = pymongo.MongoClient(port=port)
except (TypeError, pymongo.errors.ConnectionFailure):
raise RuntimeError(
'Seems that MongoBox is not running. ' +
'Do you run nosetests with --with-mongobox flag?')
return self.__mongo_client
def purge_database(self, drop=True):
'''Drops all collections in all databases but system ones
(``system.*``) one by one if :param:`drop` is `True` (default),
otherwise removes documents using `remove` method.
Both seem to be faster than dropping databases directly.
A typical use is call this method in :func:`unittest.TestCase.tearDown`
to have a clean database for every test case method.
.. code-block:: python
def tearDown(self):
super(self, MyTestCase).tearDown()
self.purge_database()
'''
# exclude system databases
database_names = (
db_name for db_name in
self.mongo_client.database_names()
if db_name not in ['local']
)
for db_name in database_names:
db = self.mongo_client[db_name]
# exclude system collections
collections = (
db[c] for c in db.collection_names()
if not c.startswith('system.')
)
for collection in collections:
if drop:
db.drop_collection(collection)
else:
try:
collection.remove(None)
except pymongo.errors.OperationFailure:
if collection.options().get('capped', False):
# cannot remove documents from capped collections
# in latest version of Mongo. Dropping instead.
db.drop_collection(collection)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from unittest import TestCase
import os
try:
import pymongo
except ImportError:
raise ImportError('PyMongo is required for MongoTestCase')
class MongoTestCase(TestCase):
'''A base for Mongo DB driven test cases. Provides
:class:`pymongo.MongoClient` instance in :attribute:`mongo_client`
and has a :method:`purge_database` helper method for database cleanup.
It is expected that tests are run from `nose` with `--with-mongobox` flag
that brings up a sandboxed instance of Mongo.
'''
__mongo_client = None
@property
def mongo_client(self):
'''Returns an instance of :class:`pymongo.MongoClient` connected
to MongoBox database instance.
'''
if not self.__mongo_client:
try:
port = int(os.getenv('MONGOBOX_PORT'))
self.__mongo_client = pymongo.MongoClient(port=port)
except (TypeError, pymongo.errors.ConnectionFailure):
raise RuntimeError(
'Seems that MongoBox is not running. ' +
'Do you run nosetests with --with-mongobox flag?')
return self.__mongo_client
def purge_database(self, drop=True):
'''Drops all collections in all databases but system ones
(``system.*``) one by one if :param:`drop` is `True` (default),
otherwise removes documents using `remove` method.
Both seem to be faster than dropping databases directly.
A typical use is call this method in :func:`unittest.TestCase.tearDown`
to have a clean database for every test case method.
.. code-block:: python
def tearDown(self):
super(self, MyTestCase).tearDown()
self.purge_database()
'''
# exclude system databases
database_names = (
db_name for db_name in
self.mongo_client.database_names()
if db_name not in ['local']
)
for db_name in database_names:
db = self.mongo_client[db_name]
# exclude system collections
collections = (
db[c] for c in db.collection_names()
if not c.startswith('system.')
)
for collection in collections:
if drop:
db.drop_collection(collection)
else:
try:
collection.remove(None)
except pymongo.errors.OperationFailure:
if collection.options().get('capped', False):
# cannot remove documents from capped collections
# in latest version of Mongo. Dropping instead.
db.drop_collection(collection) | en | 0.750001 | # -*- coding: utf-8 -*- A base for Mongo DB driven test cases. Provides :class:`pymongo.MongoClient` instance in :attribute:`mongo_client` and has a :method:`purge_database` helper method for database cleanup. It is expected that tests are run from `nose` with `--with-mongobox` flag that brings up a sandboxed instance of Mongo. Returns an instance of :class:`pymongo.MongoClient` connected to MongoBox database instance. Drops all collections in all databases but system ones (``system.*``) one by one if :param:`drop` is `True` (default), otherwise removes documents using `remove` method. Both seem to be faster than dropping databases directly. A typical use is call this method in :func:`unittest.TestCase.tearDown` to have a clean database for every test case method. .. code-block:: python def tearDown(self): super(self, MyTestCase).tearDown() self.purge_database() # exclude system databases # exclude system collections # cannot remove documents from capped collections # in latest version of Mongo. Dropping instead. | 2.912895 | 3 |
test_scripts/flowcam/background_subtraction.py | Thubaralei/particle-classification | 1 | 6615376 | import os
from glob import glob
import cv2
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import skimage.morphology as skm
from miso.utils.flowcam import parse_image_list
source_dir = r"C:\Users\rossm\OneDrive\Datasets\Plankton\F44 80 micron"
cal_filename = os.path.join(source_dir, "cal_image_000001.tif")
raw_filenames = sorted(glob(os.path.join(source_dir, "rawfile_*.tif")))
lst_filename = sorted(glob(os.path.join(source_dir, "*.lst")))[0]
df = parse_image_list(lst_filename)
print(df)
im_cal = skio.imread(cal_filename).astype(np.float32)
for raw_filename in tqdm(raw_filenames[:4]):
im_raw = skio.imread(raw_filename)
plt.imshow(im_raw)
plt.show()
ims = []
for raw_filename in tqdm(raw_filenames[:40]):
im_raw = skio.imread(raw_filename)
ims.append(im_raw)
ims = np.asarray(ims)
bg = np.median(ims, axis=0)
plt.imshow(bg/255)
plt.show()
plt.imshow(np.abs(im_raw - bg) / 255)
plt.colorbar()
plt.show()
plt.imshow((np.abs(im_raw - bg) > 5).astype(np.float32))
plt.show()
plt.imshow(im_raw), plt.show()
im_raw = ims[37]
plt.imshow(im_raw), plt.show()
gr = np.max(np.abs(im_raw - bg), axis=-1) > 20
plt.imshow(gr.astype(np.float32))
plt.show()
from scipy import ndimage
grc = skm.binary_closing(gr, skm.disk(5))
grc = skm.area_opening(grc, 256)
grc = ndimage.binary_fill_holes(grc)
plt.imshow(grc.astype(np.float32))
plt.show()
# Group the results by image
# df_grouped = df.groupby("collage_file")
# Extra info to save
# df_filename = [""] * len(df)
# df_cls = [""] * len(df)
# df_campaign = [campaign_name] * len(df)
# df_sample = [run_name] * len(df)
im_save_dir = os.path.join(source_dir, "new_images")
os.makedirs(im_save_dir, exist_ok=True)
mask_save_dir = os.path.join(source_dir, "new_masks")
os.makedirs(mask_save_dir, exist_ok=True)
# Process each image
for fi, filename in tqdm(enumerate(raw_filenames)):
# Load the image
im_filename = os.path.join(source_dir, filename)
im = skio.imread(im_filename)
# Calculate mask
gr = np.max(np.abs(im - bg), axis=-1) > 20
grc = skm.binary_closing(gr, skm.disk(5))
grc = skm.area_opening(grc, 256)
mask = ndimage.binary_fill_holes(grc)
# cv2.imshow("im", mask.astype(np.float32))
# cv2.waitKey(10)
# Find contours
contours = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
# is_mask = False
# if save_mask:
# if os.path.exists(im_filename):
# try:
# mask = skio.imread(mask_filename)
# is_mask = True
# except:
# print("Error opening {}".format(mask_filename))
# else:
# print("Mask not found {}".format(im_filename))
# Cut each image out
for ci, contour in enumerate(contours[0]):
bb = cv2.boundingRect(contour)
print(bb)
# row_id = row[0]
# row = row[1]
# # Get image coordinates
# id = row['id']
# x = row['image_x']
# y = row['image_y']
# width = row['image_w']
# height = row['image_h']
# Get the segmented mask
x = bb[0]
y = bb[1]
width = bb[2]
height = bb[3]
seg_im = im[y:y + height, x:x + width, ...]
seg_im_filename = os.path.join(im_save_dir, "{:04d}_{:04d}.png".format(fi, ci))
skio.imsave(seg_im_filename, seg_im)
seg_mask = mask[y:y + height, x:x + width, ...]
seg_mask = seg_mask.astype(np.uint8) * 255
seg_mask_filename = os.path.join(mask_save_dir, "{:04d}_{:04d}.png".format(fi, ci))
skio.imsave(seg_mask_filename, seg_mask)
# df_cls[id-1] = cls
# df_filename[id-1] = seg_im_filename
| import os
from glob import glob
import cv2
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
from tqdm import tqdm
import skimage.morphology as skm
from miso.utils.flowcam import parse_image_list
source_dir = r"C:\Users\rossm\OneDrive\Datasets\Plankton\F44 80 micron"
cal_filename = os.path.join(source_dir, "cal_image_000001.tif")
raw_filenames = sorted(glob(os.path.join(source_dir, "rawfile_*.tif")))
lst_filename = sorted(glob(os.path.join(source_dir, "*.lst")))[0]
df = parse_image_list(lst_filename)
print(df)
im_cal = skio.imread(cal_filename).astype(np.float32)
for raw_filename in tqdm(raw_filenames[:4]):
im_raw = skio.imread(raw_filename)
plt.imshow(im_raw)
plt.show()
ims = []
for raw_filename in tqdm(raw_filenames[:40]):
im_raw = skio.imread(raw_filename)
ims.append(im_raw)
ims = np.asarray(ims)
bg = np.median(ims, axis=0)
plt.imshow(bg/255)
plt.show()
plt.imshow(np.abs(im_raw - bg) / 255)
plt.colorbar()
plt.show()
plt.imshow((np.abs(im_raw - bg) > 5).astype(np.float32))
plt.show()
plt.imshow(im_raw), plt.show()
im_raw = ims[37]
plt.imshow(im_raw), plt.show()
gr = np.max(np.abs(im_raw - bg), axis=-1) > 20
plt.imshow(gr.astype(np.float32))
plt.show()
from scipy import ndimage
grc = skm.binary_closing(gr, skm.disk(5))
grc = skm.area_opening(grc, 256)
grc = ndimage.binary_fill_holes(grc)
plt.imshow(grc.astype(np.float32))
plt.show()
# Group the results by image
# df_grouped = df.groupby("collage_file")
# Extra info to save
# df_filename = [""] * len(df)
# df_cls = [""] * len(df)
# df_campaign = [campaign_name] * len(df)
# df_sample = [run_name] * len(df)
im_save_dir = os.path.join(source_dir, "new_images")
os.makedirs(im_save_dir, exist_ok=True)
mask_save_dir = os.path.join(source_dir, "new_masks")
os.makedirs(mask_save_dir, exist_ok=True)
# Process each image
for fi, filename in tqdm(enumerate(raw_filenames)):
# Load the image
im_filename = os.path.join(source_dir, filename)
im = skio.imread(im_filename)
# Calculate mask
gr = np.max(np.abs(im - bg), axis=-1) > 20
grc = skm.binary_closing(gr, skm.disk(5))
grc = skm.area_opening(grc, 256)
mask = ndimage.binary_fill_holes(grc)
# cv2.imshow("im", mask.astype(np.float32))
# cv2.waitKey(10)
# Find contours
contours = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(contours)
# is_mask = False
# if save_mask:
# if os.path.exists(im_filename):
# try:
# mask = skio.imread(mask_filename)
# is_mask = True
# except:
# print("Error opening {}".format(mask_filename))
# else:
# print("Mask not found {}".format(im_filename))
# Cut each image out
for ci, contour in enumerate(contours[0]):
bb = cv2.boundingRect(contour)
print(bb)
# row_id = row[0]
# row = row[1]
# # Get image coordinates
# id = row['id']
# x = row['image_x']
# y = row['image_y']
# width = row['image_w']
# height = row['image_h']
# Get the segmented mask
x = bb[0]
y = bb[1]
width = bb[2]
height = bb[3]
seg_im = im[y:y + height, x:x + width, ...]
seg_im_filename = os.path.join(im_save_dir, "{:04d}_{:04d}.png".format(fi, ci))
skio.imsave(seg_im_filename, seg_im)
seg_mask = mask[y:y + height, x:x + width, ...]
seg_mask = seg_mask.astype(np.uint8) * 255
seg_mask_filename = os.path.join(mask_save_dir, "{:04d}_{:04d}.png".format(fi, ci))
skio.imsave(seg_mask_filename, seg_mask)
# df_cls[id-1] = cls
# df_filename[id-1] = seg_im_filename
| en | 0.516169 | # Group the results by image # df_grouped = df.groupby("collage_file") # Extra info to save # df_filename = [""] * len(df) # df_cls = [""] * len(df) # df_campaign = [campaign_name] * len(df) # df_sample = [run_name] * len(df) # Process each image # Load the image # Calculate mask # cv2.imshow("im", mask.astype(np.float32)) # cv2.waitKey(10) # Find contours # is_mask = False # if save_mask: # if os.path.exists(im_filename): # try: # mask = skio.imread(mask_filename) # is_mask = True # except: # print("Error opening {}".format(mask_filename)) # else: # print("Mask not found {}".format(im_filename)) # Cut each image out # row_id = row[0] # row = row[1] # # Get image coordinates # id = row['id'] # x = row['image_x'] # y = row['image_y'] # width = row['image_w'] # height = row['image_h'] # Get the segmented mask # df_cls[id-1] = cls # df_filename[id-1] = seg_im_filename | 2.287084 | 2 |
molsysmt/physchem/groups/polarity.py | dprada/molsysmt | 3 | 6615377 |
#Grantham, R. Science. 185, 862–864 (1974)
#(amino acid side chain composition, polarity and molecular volume)
grantham = {
'ALA': 8.100,
'ARG': 10.500,
'ASN': 11.600,
'ASP': 13.000,
'CYS': 5.500,
'GLN': 10.500,
'GLU': 12.300,
'GLY': 9.000,
'HIS': 10.400,
'ILE': 5.200,
'LEU': 4.900,
'LYS': 11.300,
'MET': 5.700,
'PHE': 5.200,
'PRO': 8.000,
'SER': 9.200,
'THR': 8.600,
'TRP': 5.400,
'TYR': 6.200,
'VAL': 5.900
}
#<NAME>, <NAME>, <NAME>, <NAME>. Biol. 1968, 21, 170–201.
zimmerman = {
'ALA': 0.000,
'ARG': 52.000,
'ASN': 3.380,
'ASP': 49.700,
'CYS': 1.480,
'GLN': 3.530,
'GLU': 49.900,
'GLY': 0.000,
'HIS': 51.600,
'ILE': 0.130,
'LEU': 0.130,
'LYS': 49.500,
'MET': 1.430,
'PHE': 0.350,
'PRO': 1.580,
'SER': 1.670,
'THR': 1.660,
'TRP': 2.100,
'TYR': 1.610,
'VAL': 0.130
}
|
#Grantham, R. Science. 185, 862–864 (1974)
#(amino acid side chain composition, polarity and molecular volume)
grantham = {
'ALA': 8.100,
'ARG': 10.500,
'ASN': 11.600,
'ASP': 13.000,
'CYS': 5.500,
'GLN': 10.500,
'GLU': 12.300,
'GLY': 9.000,
'HIS': 10.400,
'ILE': 5.200,
'LEU': 4.900,
'LYS': 11.300,
'MET': 5.700,
'PHE': 5.200,
'PRO': 8.000,
'SER': 9.200,
'THR': 8.600,
'TRP': 5.400,
'TYR': 6.200,
'VAL': 5.900
}
#<NAME>, <NAME>, <NAME>, <NAME>. Biol. 1968, 21, 170–201.
zimmerman = {
'ALA': 0.000,
'ARG': 52.000,
'ASN': 3.380,
'ASP': 49.700,
'CYS': 1.480,
'GLN': 3.530,
'GLU': 49.900,
'GLY': 0.000,
'HIS': 51.600,
'ILE': 0.130,
'LEU': 0.130,
'LYS': 49.500,
'MET': 1.430,
'PHE': 0.350,
'PRO': 1.580,
'SER': 1.670,
'THR': 1.660,
'TRP': 2.100,
'TYR': 1.610,
'VAL': 0.130
}
| en | 0.506585 | #Grantham, R. Science. 185, 862–864 (1974) #(amino acid side chain composition, polarity and molecular volume) #<NAME>, <NAME>, <NAME>, <NAME>. Biol. 1968, 21, 170–201. | 1.527761 | 2 |
additional/nn_model/code/ner_neural.py | uds-lsv/anea | 14 | 6615378 | <gh_stars>10-100
# Copyright 2020 Saarland University, Spoken Language Systems LSV
# Author: <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS*, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import keras
from keras.models import Model
from keras.callbacks import Callback
from keras.layers import Input, Dense, LSTM, GRU, Bidirectional, Dropout
from ner_datacode import DataCreation, Embedding, Evaluation
from experimentalsettings import ExperimentalSettings
# CHANGE CONFIG FILE HERE FOR DIFFERENT SETUPS
# CONFIG FILES ARE STORED IN THE ../config DIRECTORY
SETTINGS = ExperimentalSettings.load_json("ner_neural_03_et_2xdata_01")
embedding = Embedding()
if SETTINGS["EMBEDDING"] == "glove":
embedding.load_glove_840b()
elif SETTINGS["EMBEDDING"] == "fasttext-et":
embedding.load_fasttext("et")
elif SETTINGS["EMBEDDING"] == "fasttext-en":
embedding.load_fasttext("en")
elif SETTINGS["EMBEDDING"] == "fasttext-fy":
embedding.load_fasttext("fy")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-et":
embedding.load_fasttext("et-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-en":
embedding.load_fasttext("en-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-fy":
embedding.load_fasttext("fy-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-es":
embedding.load_fasttext("es-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-yo":
embedding.load_fasttext("yo-cc")
embedding.use_specific_label_map(SETTINGS["LABEL_MAP"])
def load_and_preprocess_data(path_to_data, embedding, input_separator=SETTINGS["DATA_SEPARATOR"]):
data_creation = DataCreation(input_separator=input_separator)
instances = data_creation.load_connl_dataset(path_to_data, SETTINGS["CONTEXT_LENGTH"], remove_label_prefix=False)
embedding.count_instances_without_target_embedding(instances)
instances_embedded = embedding.instances_to_vectors(instances)
return instances_embedded
def create_data_matrix(instances_embedded):
x, y = embedding.instances_to_numpy_arrays(instances_embedded)
return x, y
train = load_and_preprocess_data(SETTINGS["PATH_TRAIN"], embedding)
if SETTINGS["USE_NOISY"]:
train_distant = load_and_preprocess_data(SETTINGS["PATH_TRAIN_DISTANT"], embedding)
dev = load_and_preprocess_data(SETTINGS["PATH_DEV"], embedding)
test = load_and_preprocess_data(SETTINGS["PATH_TEST"], embedding)
def create_model():
input_shape = (SETTINGS["CONTEXT_LENGTH"]*2+1, embedding.embedding_vector_size)
feature_input_layer = Input(shape=input_shape, name="input_text")
dropout1 = Dropout(SETTINGS["DROPOUT_1"])(feature_input_layer)
if SETTINGS["RNN_TYPE"] == "LSTM":
rnn = LSTM(SETTINGS["RNN_SIZE"])
elif SETTINGS["RNN_TYPE"] == "GRU":
rnn = GRU(SETTINGS["RNN_SIZE"])
bi_rnn_layer = Bidirectional(rnn, merge_mode='concat', name="bi-rnn")(dropout1)
dropout2 = Dropout(SETTINGS["DROPOUT_2"])(bi_rnn_layer)
dense_layer = Dense(SETTINGS["DENSE_SIZE"], activation=SETTINGS["DENSE_ACTIVATION"], name="dense")(dropout2)
dropout3 = Dropout(SETTINGS["DROPOUT_3"])(dense_layer)
softmax_output_layer = Dense(embedding.get_num_labels(), activation='softmax', name="softmax_out")(dropout3)
model = Model(inputs=[feature_input_layer], outputs=softmax_output_layer)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer="adam", metrics=['accuracy'])
return model
class F1scores(Callback):
def __init__(self, data, model, name, log_file=None, save_best_model_weights_file=None):
super(F1scores, self).__init__()
self.data = data
self.model = model
self.name = name
self.log_file = log_file
self.save_best_model_weights_file = save_best_model_weights_file
self.best_f1 = -1
def on_epoch_end(self, batch, logs={}):
data_x, _ = create_data_matrix(self.data)
predictions = self.model.predict(data_x)
predictions = embedding.predictions_to_labels(predictions)
evaluation = Evaluation(separator=SETTINGS["DATA_SEPARATOR"])
connl_evaluation_string = evaluation.create_conll_evaluation_format(self.data, predictions)
evaluation_output = evaluation.evaluate_with_perl_script(connl_evaluation_string)
f1 = evaluation.extract_f_score(evaluation_output)
logs[self.name] = f1
if f1 > self.best_f1:
if self.save_best_model_weights_file is not None:
print(f"Saving model because on {self.name} new f1 {f1} is better than old f1 {self.best_f1}.")
self.model.save_weights(self.save_best_model_weights_file)
self.best_f1 = f1
else:
print(f"No improvement over f1 on {self.name} (best: {self.best_f1}, current: {f1}).")
return self.best_f1, evaluation_output
class Logger(Callback):
def __init__(self, filename):
super(Logger, self).__init__()
self.filename = filename
self.epoch = 1
self.need_init = True
def on_epoch_end(self, batch, logs={}):
if self.need_init:
with open(self.filename, 'w') as fout:
fout.write('epoch,')
fout.write(','.join(sorted(logs)))
fout.write('\n')
self.need_init = False
s = f"{self.epoch},"
for key in sorted(logs):
s += f'{logs[key]},'
s = s[0:len(s) - 1] + '\n'
with open(self.filename, 'a') as fout:
fout.write(s)
self.epoch += 1
def train_and_evaluate():
model = create_model()
train_data = train
if SETTINGS["USE_NOISY"]:
train_data = list(train_distant)
train_data.extend(train)
train_x, train_y = create_data_matrix(train_data)
storage_directory = "../logs/"
best_model_path = storage_directory + SETTINGS["NAME"] + "_best_model_on_dev_weights.h5"
valid_f1 = F1scores(dev, model, 'dev_f1', save_best_model_weights_file=best_model_path)
log = Logger(storage_directory + SETTINGS["NAME"] + '_training.log')
model.fit(x=train_x,
y=train_y,
batch_size=SETTINGS["BATCH_SIZE"],
epochs=SETTINGS["EPOCHS"],
verbose=0,
shuffle=not SETTINGS["USE_NOISY"],
callbacks=[valid_f1, log])
model.load_weights(best_model_path)
_, test_eval_output = F1scores(test, model,
'test_f1').on_epoch_end(None)
with open(storage_directory + SETTINGS["NAME"] + "_test_f1_on_best_dev_model.txt", "w") as out_file:
out_file.write(f"{test_eval_output}\n")
train_and_evaluate()
| # Copyright 2020 Saarland University, Spoken Language Systems LSV
# Author: <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS*, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import keras
from keras.models import Model
from keras.callbacks import Callback
from keras.layers import Input, Dense, LSTM, GRU, Bidirectional, Dropout
from ner_datacode import DataCreation, Embedding, Evaluation
from experimentalsettings import ExperimentalSettings
# CHANGE CONFIG FILE HERE FOR DIFFERENT SETUPS
# CONFIG FILES ARE STORED IN THE ../config DIRECTORY
SETTINGS = ExperimentalSettings.load_json("ner_neural_03_et_2xdata_01")
embedding = Embedding()
if SETTINGS["EMBEDDING"] == "glove":
embedding.load_glove_840b()
elif SETTINGS["EMBEDDING"] == "fasttext-et":
embedding.load_fasttext("et")
elif SETTINGS["EMBEDDING"] == "fasttext-en":
embedding.load_fasttext("en")
elif SETTINGS["EMBEDDING"] == "fasttext-fy":
embedding.load_fasttext("fy")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-et":
embedding.load_fasttext("et-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-en":
embedding.load_fasttext("en-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-fy":
embedding.load_fasttext("fy-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-es":
embedding.load_fasttext("es-cc")
elif SETTINGS["EMBEDDING"] == "fasttext-cc-yo":
embedding.load_fasttext("yo-cc")
embedding.use_specific_label_map(SETTINGS["LABEL_MAP"])
def load_and_preprocess_data(path_to_data, embedding, input_separator=SETTINGS["DATA_SEPARATOR"]):
data_creation = DataCreation(input_separator=input_separator)
instances = data_creation.load_connl_dataset(path_to_data, SETTINGS["CONTEXT_LENGTH"], remove_label_prefix=False)
embedding.count_instances_without_target_embedding(instances)
instances_embedded = embedding.instances_to_vectors(instances)
return instances_embedded
def create_data_matrix(instances_embedded):
x, y = embedding.instances_to_numpy_arrays(instances_embedded)
return x, y
train = load_and_preprocess_data(SETTINGS["PATH_TRAIN"], embedding)
if SETTINGS["USE_NOISY"]:
train_distant = load_and_preprocess_data(SETTINGS["PATH_TRAIN_DISTANT"], embedding)
dev = load_and_preprocess_data(SETTINGS["PATH_DEV"], embedding)
test = load_and_preprocess_data(SETTINGS["PATH_TEST"], embedding)
def create_model():
input_shape = (SETTINGS["CONTEXT_LENGTH"]*2+1, embedding.embedding_vector_size)
feature_input_layer = Input(shape=input_shape, name="input_text")
dropout1 = Dropout(SETTINGS["DROPOUT_1"])(feature_input_layer)
if SETTINGS["RNN_TYPE"] == "LSTM":
rnn = LSTM(SETTINGS["RNN_SIZE"])
elif SETTINGS["RNN_TYPE"] == "GRU":
rnn = GRU(SETTINGS["RNN_SIZE"])
bi_rnn_layer = Bidirectional(rnn, merge_mode='concat', name="bi-rnn")(dropout1)
dropout2 = Dropout(SETTINGS["DROPOUT_2"])(bi_rnn_layer)
dense_layer = Dense(SETTINGS["DENSE_SIZE"], activation=SETTINGS["DENSE_ACTIVATION"], name="dense")(dropout2)
dropout3 = Dropout(SETTINGS["DROPOUT_3"])(dense_layer)
softmax_output_layer = Dense(embedding.get_num_labels(), activation='softmax', name="softmax_out")(dropout3)
model = Model(inputs=[feature_input_layer], outputs=softmax_output_layer)
model.compile(loss=keras.losses.categorical_crossentropy, optimizer="adam", metrics=['accuracy'])
return model
class F1scores(Callback):
def __init__(self, data, model, name, log_file=None, save_best_model_weights_file=None):
super(F1scores, self).__init__()
self.data = data
self.model = model
self.name = name
self.log_file = log_file
self.save_best_model_weights_file = save_best_model_weights_file
self.best_f1 = -1
def on_epoch_end(self, batch, logs={}):
data_x, _ = create_data_matrix(self.data)
predictions = self.model.predict(data_x)
predictions = embedding.predictions_to_labels(predictions)
evaluation = Evaluation(separator=SETTINGS["DATA_SEPARATOR"])
connl_evaluation_string = evaluation.create_conll_evaluation_format(self.data, predictions)
evaluation_output = evaluation.evaluate_with_perl_script(connl_evaluation_string)
f1 = evaluation.extract_f_score(evaluation_output)
logs[self.name] = f1
if f1 > self.best_f1:
if self.save_best_model_weights_file is not None:
print(f"Saving model because on {self.name} new f1 {f1} is better than old f1 {self.best_f1}.")
self.model.save_weights(self.save_best_model_weights_file)
self.best_f1 = f1
else:
print(f"No improvement over f1 on {self.name} (best: {self.best_f1}, current: {f1}).")
return self.best_f1, evaluation_output
class Logger(Callback):
def __init__(self, filename):
super(Logger, self).__init__()
self.filename = filename
self.epoch = 1
self.need_init = True
def on_epoch_end(self, batch, logs={}):
if self.need_init:
with open(self.filename, 'w') as fout:
fout.write('epoch,')
fout.write(','.join(sorted(logs)))
fout.write('\n')
self.need_init = False
s = f"{self.epoch},"
for key in sorted(logs):
s += f'{logs[key]},'
s = s[0:len(s) - 1] + '\n'
with open(self.filename, 'a') as fout:
fout.write(s)
self.epoch += 1
def train_and_evaluate():
model = create_model()
train_data = train
if SETTINGS["USE_NOISY"]:
train_data = list(train_distant)
train_data.extend(train)
train_x, train_y = create_data_matrix(train_data)
storage_directory = "../logs/"
best_model_path = storage_directory + SETTINGS["NAME"] + "_best_model_on_dev_weights.h5"
valid_f1 = F1scores(dev, model, 'dev_f1', save_best_model_weights_file=best_model_path)
log = Logger(storage_directory + SETTINGS["NAME"] + '_training.log')
model.fit(x=train_x,
y=train_y,
batch_size=SETTINGS["BATCH_SIZE"],
epochs=SETTINGS["EPOCHS"],
verbose=0,
shuffle=not SETTINGS["USE_NOISY"],
callbacks=[valid_f1, log])
model.load_weights(best_model_path)
_, test_eval_output = F1scores(test, model,
'test_f1').on_epoch_end(None)
with open(storage_directory + SETTINGS["NAME"] + "_test_f1_on_best_dev_model.txt", "w") as out_file:
out_file.write(f"{test_eval_output}\n")
train_and_evaluate() | en | 0.684518 | # Copyright 2020 Saarland University, Spoken Language Systems LSV # Author: <NAME>, <NAME>, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # THIS CODE IS PROVIDED *AS IS*, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED # WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, # MERCHANTABLITY OR NON-INFRINGEMENT. # # See the Apache 2 License for the specific language governing permissions and # limitations under the License. # CHANGE CONFIG FILE HERE FOR DIFFERENT SETUPS # CONFIG FILES ARE STORED IN THE ../config DIRECTORY | 1.995642 | 2 |
tools/bin/export-import-projects.py | socal-ucr/DAGEE | 20 | 6615379 | <filename>tools/bin/export-import-projects.py<gh_stars>10-100
# Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms.
import gitlab
import time
# private token or personal token authentication
# Do the following steps if you don't have/don't know your personal access token
# Get private token by going to your user settings -> access tokens -> Add a personal access token
# Check api/read_user/read_repository/write_repository and any other check boxes that may be there.
# Click the 'Create personal access token' button
# NOTE: the access token disappears if you go back or exit the page. So, save it to clipboard IMMEDIATELY!
gl = gitlab.Gitlab('gitab_url', private_token='XXX')
# Some Python commands to get a list of projects
# depending on different search criteria.
# projects = gl.projects.list(owned=True)
# projects = gl.projects.list(search='PIM')
# projects = gl.projects.list(all=True)
# Get PIM Emulator project by ID
# Project Settings -> General -> General project settings/Name,topics,avatar
# Copy the Project ID in one of the top right boxes
pim_project_id = PROJECT_ID
pim_project = gl.projects.get(pim_project_id)
# Create the export
export = pim_project.exports.create({})
# Wait for the 'finished' status
export.refresh()
while export.export_status != 'finished':
time.sleep(1)
export.refresh()
# Download the result
with open('/path/to/exported-project.tgz', 'wb') as f:
export.download(streamed=True, action=f.write)
# Copy the below to another python script if we want to import from the saved/exported file to the gitlab server
# output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace')
## if you want to overwrite existing project:
## output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace', True)
## Get a ProjectImport object to track the import status
# project_import = gl.projects.get(output['id'], lazy=True).imports.get()
# while project_import.import_status != 'finished':
# time.sleep(1)
# project_import.refresh()
| <filename>tools/bin/export-import-projects.py<gh_stars>10-100
# Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms.
import gitlab
import time
# private token or personal token authentication
# Do the following steps if you don't have/don't know your personal access token
# Get private token by going to your user settings -> access tokens -> Add a personal access token
# Check api/read_user/read_repository/write_repository and any other check boxes that may be there.
# Click the 'Create personal access token' button
# NOTE: the access token disappears if you go back or exit the page. So, save it to clipboard IMMEDIATELY!
gl = gitlab.Gitlab('gitab_url', private_token='XXX')
# Some Python commands to get a list of projects
# depending on different search criteria.
# projects = gl.projects.list(owned=True)
# projects = gl.projects.list(search='PIM')
# projects = gl.projects.list(all=True)
# Get PIM Emulator project by ID
# Project Settings -> General -> General project settings/Name,topics,avatar
# Copy the Project ID in one of the top right boxes
pim_project_id = PROJECT_ID
pim_project = gl.projects.get(pim_project_id)
# Create the export
export = pim_project.exports.create({})
# Wait for the 'finished' status
export.refresh()
while export.export_status != 'finished':
time.sleep(1)
export.refresh()
# Download the result
with open('/path/to/exported-project.tgz', 'wb') as f:
export.download(streamed=True, action=f.write)
# Copy the below to another python script if we want to import from the saved/exported file to the gitlab server
# output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace')
## if you want to overwrite existing project:
## output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace', True)
## Get a ProjectImport object to track the import status
# project_import = gl.projects.get(output['id'], lazy=True).imports.get()
# while project_import.import_status != 'finished':
# time.sleep(1)
# project_import.refresh()
| en | 0.623928 | # Copyright (c) 2018-Present Advanced Micro Devices, Inc. See LICENSE.TXT for terms. # private token or personal token authentication # Do the following steps if you don't have/don't know your personal access token # Get private token by going to your user settings -> access tokens -> Add a personal access token # Check api/read_user/read_repository/write_repository and any other check boxes that may be there. # Click the 'Create personal access token' button # NOTE: the access token disappears if you go back or exit the page. So, save it to clipboard IMMEDIATELY! # Some Python commands to get a list of projects # depending on different search criteria. # projects = gl.projects.list(owned=True) # projects = gl.projects.list(search='PIM') # projects = gl.projects.list(all=True) # Get PIM Emulator project by ID # Project Settings -> General -> General project settings/Name,topics,avatar # Copy the Project ID in one of the top right boxes # Create the export # Wait for the 'finished' status # Download the result # Copy the below to another python script if we want to import from the saved/exported file to the gitlab server # output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace') ## if you want to overwrite existing project: ## output = gl.projects.import_project(open('/path/to/exported-project.tgz', 'rb'), 'Project Name', 'Project Namespace', True) ## Get a ProjectImport object to track the import status # project_import = gl.projects.get(output['id'], lazy=True).imports.get() # while project_import.import_status != 'finished': # time.sleep(1) # project_import.refresh() | 2.401051 | 2 |
custom-interfaces/video-segmentation-beaverdam/annotator/services.py | stungkit/labelbox | 1,345 | 6615380 | import mturk.queries
import logging
from django.http import HttpResponse, Http404
from django.contrib.admin.views.decorators import staff_member_required
from mturk.models import Task, FullVideoTask
from .models import *
from mturk.queries import *
from decimal import Decimal
logger = logging.getLogger()
@staff_member_required
def publish_videos_to_turk(videos):
for video in videos:
video_task = get_active_video_turk_task(id)
if video_task != None:
raise Exception('video {} already has an active FullVideoTask'.format(id))
video_task = FullVideoTask(video = video)
video_task.publish()
@staff_member_required
def verify(request, video_id):
body = request.body.decode('utf-8')
video = Video.objects.get(id=video_id)
if body == 'true':
video.verified = True
elif body == 'false':
video.verified = False
else:
print(body)
return HttpResponseBadRequest()
video.save()
return HttpResponse('video verification state saved')
@staff_member_required
def accept_video(request, video_id, bonus, message, reopen, clear_boxes, blockWorker, updatedAnnotation):
video = Video.objects.get(pk=video_id)
video.verified = True
video_task = get_active_video_turk_task(video.id)
if video_task != None:
# accept on turk
video_task.approve_assignment(bonus, message)
if blockWorker:
video_task.blockWorker()
# delete from Turk
video_task.archive_turk_hit()
video_task.bonus = Decimal(bonus)
video_task.message = message
video_task.paid = True
video_task.closed = True
video_task.save()
# create a new HIT for this instaed
if reopen:
new_task = FullVideoTask(video = video)
new_task.publish()
# now mark the video as unverified as we're asking somebody else to fill this in
# why would we do this? Sometimes it's a better strategy to accept somebody's work,
# and block the worker but then get somebody else to do the work
video.verified = False
# clear the boxes as specified
if clear_boxes:
video.annotation = ''
else:
video.annotation = updatedAnnotation
video.rejected = False
video.save()
@staff_member_required
def reject_video(request, video_id, message, reopen, clear_boxes, blockWorker, updatedAnnotation):
video = Video.objects.get(pk=video_id)
video_task = get_active_video_turk_task(video.id)
if video_task != None:
# reject on turk
video_task.reject_assignment(message)
if blockWorker:
video_task.blockWorker()
# update the task
video_task.message = message
video_task.rejected = True
video_task.bonus = 0
video_task.closed = True
video_task.save()
# delete from Turk
video_task.archive_turk_hit()
# create a new HIT for this instaed
if reopen:
new_task = FullVideoTask(video = video)
new_task.publish()
# clear the boxes as specified
if clear_boxes:
video.annotation = ''
else:
video.annotation = updatedAnnotation
video.verified = False
video.rejected = True
video.save()
@staff_member_required
def email_worker(request, video_id, subject, message):
video = Video.objects.get(pk=video_id)
video_task = get_active_video_turk_task(video.id)
if video_task == None:
raise Exception("No video task to send email for {}".format(video_id))
video_task.send_email(subject, message) | import mturk.queries
import logging
from django.http import HttpResponse, Http404
from django.contrib.admin.views.decorators import staff_member_required
from mturk.models import Task, FullVideoTask
from .models import *
from mturk.queries import *
from decimal import Decimal
logger = logging.getLogger()
@staff_member_required
def publish_videos_to_turk(videos):
for video in videos:
video_task = get_active_video_turk_task(id)
if video_task != None:
raise Exception('video {} already has an active FullVideoTask'.format(id))
video_task = FullVideoTask(video = video)
video_task.publish()
@staff_member_required
def verify(request, video_id):
body = request.body.decode('utf-8')
video = Video.objects.get(id=video_id)
if body == 'true':
video.verified = True
elif body == 'false':
video.verified = False
else:
print(body)
return HttpResponseBadRequest()
video.save()
return HttpResponse('video verification state saved')
@staff_member_required
def accept_video(request, video_id, bonus, message, reopen, clear_boxes, blockWorker, updatedAnnotation):
video = Video.objects.get(pk=video_id)
video.verified = True
video_task = get_active_video_turk_task(video.id)
if video_task != None:
# accept on turk
video_task.approve_assignment(bonus, message)
if blockWorker:
video_task.blockWorker()
# delete from Turk
video_task.archive_turk_hit()
video_task.bonus = Decimal(bonus)
video_task.message = message
video_task.paid = True
video_task.closed = True
video_task.save()
# create a new HIT for this instaed
if reopen:
new_task = FullVideoTask(video = video)
new_task.publish()
# now mark the video as unverified as we're asking somebody else to fill this in
# why would we do this? Sometimes it's a better strategy to accept somebody's work,
# and block the worker but then get somebody else to do the work
video.verified = False
# clear the boxes as specified
if clear_boxes:
video.annotation = ''
else:
video.annotation = updatedAnnotation
video.rejected = False
video.save()
@staff_member_required
def reject_video(request, video_id, message, reopen, clear_boxes, blockWorker, updatedAnnotation):
video = Video.objects.get(pk=video_id)
video_task = get_active_video_turk_task(video.id)
if video_task != None:
# reject on turk
video_task.reject_assignment(message)
if blockWorker:
video_task.blockWorker()
# update the task
video_task.message = message
video_task.rejected = True
video_task.bonus = 0
video_task.closed = True
video_task.save()
# delete from Turk
video_task.archive_turk_hit()
# create a new HIT for this instaed
if reopen:
new_task = FullVideoTask(video = video)
new_task.publish()
# clear the boxes as specified
if clear_boxes:
video.annotation = ''
else:
video.annotation = updatedAnnotation
video.verified = False
video.rejected = True
video.save()
@staff_member_required
def email_worker(request, video_id, subject, message):
video = Video.objects.get(pk=video_id)
video_task = get_active_video_turk_task(video.id)
if video_task == None:
raise Exception("No video task to send email for {}".format(video_id))
video_task.send_email(subject, message) | en | 0.935824 | # accept on turk # delete from Turk # create a new HIT for this instaed # now mark the video as unverified as we're asking somebody else to fill this in # why would we do this? Sometimes it's a better strategy to accept somebody's work, # and block the worker but then get somebody else to do the work # clear the boxes as specified # reject on turk # update the task # delete from Turk # create a new HIT for this instaed # clear the boxes as specified | 2.111566 | 2 |
tests/test_copy.py | nmaxwell/OpenMesh-Python | 9 | 6615381 | import unittest
import openmesh
import copy
class Python(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
self.vh1 = self.mesh.add_vertex([0, 0, 0])
self.vh2 = self.mesh.add_vertex([1, 0, 0])
self.vh3 = self.mesh.add_vertex([1, 1, 0])
self.fh = self.mesh.add_face(self.vh1, self.vh2, self.vh3)
self.mesh.set_vertex_property('test', self.vh1, self.mesh)
self.mesh.set_vertex_property('test', self.vh2, self.mesh)
self.one_two_three = [1, 2, 3]
self.mesh.set_vertex_property('test', self.vh3, self.one_two_three)
self.mesh.set_face_property('test', self.fh, self.one_two_three)
def test_shallowcopy(self):
mesh_copy = copy.copy(self.mesh)
mesh_copy.vertex_property('test', self.vh3)[:] = [4, 5, 6]
self.assertIsNot(self.mesh, mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh1), self.mesh)
self.assertIs(mesh_copy.vertex_property('test', self.vh2), self.mesh)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), self.one_two_three)
self.assertIs(mesh_copy.face_property('test', self.fh), self.one_two_three)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), mesh_copy.face_property('test', self.fh))
self.assertEqual(self.mesh.vertex_property('test', self.vh3), [4, 5, 6])
self.assertEqual(mesh_copy.vertex_property('test', self.vh3), [4, 5, 6])
def test_deepcopy(self):
mesh_copy = copy.deepcopy(self.mesh)
mesh_copy.vertex_property('test', self.vh3)[:] = [4, 5, 6]
self.assertIsNot(self.mesh, mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh1), mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh2), mesh_copy)
self.assertIsNot(mesh_copy.vertex_property('test', self.vh3), self.one_two_three)
self.assertIsNot(mesh_copy.face_property('test', self.fh), self.one_two_three)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), mesh_copy.face_property('test', self.fh))
self.assertListEqual(self.mesh.vertex_property('test', self.vh3), [1, 2, 3])
self.assertListEqual(mesh_copy.vertex_property('test', self.vh3), [4, 5, 6])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Python)
unittest.TextTestRunner(verbosity=2).run(suite)
| import unittest
import openmesh
import copy
class Python(unittest.TestCase):
def setUp(self):
self.mesh = openmesh.TriMesh()
self.vh1 = self.mesh.add_vertex([0, 0, 0])
self.vh2 = self.mesh.add_vertex([1, 0, 0])
self.vh3 = self.mesh.add_vertex([1, 1, 0])
self.fh = self.mesh.add_face(self.vh1, self.vh2, self.vh3)
self.mesh.set_vertex_property('test', self.vh1, self.mesh)
self.mesh.set_vertex_property('test', self.vh2, self.mesh)
self.one_two_three = [1, 2, 3]
self.mesh.set_vertex_property('test', self.vh3, self.one_two_three)
self.mesh.set_face_property('test', self.fh, self.one_two_three)
def test_shallowcopy(self):
mesh_copy = copy.copy(self.mesh)
mesh_copy.vertex_property('test', self.vh3)[:] = [4, 5, 6]
self.assertIsNot(self.mesh, mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh1), self.mesh)
self.assertIs(mesh_copy.vertex_property('test', self.vh2), self.mesh)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), self.one_two_three)
self.assertIs(mesh_copy.face_property('test', self.fh), self.one_two_three)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), mesh_copy.face_property('test', self.fh))
self.assertEqual(self.mesh.vertex_property('test', self.vh3), [4, 5, 6])
self.assertEqual(mesh_copy.vertex_property('test', self.vh3), [4, 5, 6])
def test_deepcopy(self):
mesh_copy = copy.deepcopy(self.mesh)
mesh_copy.vertex_property('test', self.vh3)[:] = [4, 5, 6]
self.assertIsNot(self.mesh, mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh1), mesh_copy)
self.assertIs(mesh_copy.vertex_property('test', self.vh2), mesh_copy)
self.assertIsNot(mesh_copy.vertex_property('test', self.vh3), self.one_two_three)
self.assertIsNot(mesh_copy.face_property('test', self.fh), self.one_two_three)
self.assertIs(mesh_copy.vertex_property('test', self.vh3), mesh_copy.face_property('test', self.fh))
self.assertListEqual(self.mesh.vertex_property('test', self.vh3), [1, 2, 3])
self.assertListEqual(mesh_copy.vertex_property('test', self.vh3), [4, 5, 6])
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(Python)
unittest.TextTestRunner(verbosity=2).run(suite)
| none | 1 | 2.736029 | 3 | |
util/poller_test_generator.py | esnet/esmond-test | 0 | 6615382 | <filename>util/poller_test_generator.py
#!/usr/bin/env python
"""
Generate bogus data to put into a memcached persist queue for testing.
The approximate math on how many data points will be generated:
options.loop * options.routers * options.interfaces * (options.oidsets * 2)
as most of the oidsets being pulled have 2 oids.
"""
import json
import os
import pprint
import string
import sys
import time
from optparse import OptionParser
from esmond.api.models import OIDSet
from esmond.config import get_config, get_config_path
from esmond.persist import PollResult, MemcachedPersistQueue
pp = pprint.PrettyPrinter(indent=2)
class TestQueuesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TestQueuesWarning(Warning): pass
class TestQueues(object):
"""Manage the test queues"""
def __init__(self, config, write, verbose):
super(TestQueues, self).__init__()
self.config = config
self.write = write
self.verbose = verbose
self._queues = {}
self._device_map = {}
self._next_q = 1
if not config.persist_queues.has_key('cassandra'):
raise TestQueuesException('Config does not have cassandra persist_queues defined.')
self._cassandra_queues = config.persist_queues['cassandra'][1]
for i in xrange(1,self._cassandra_queues+1):
qname = 'cassandra_{0}'.format(i)
self._queues[qname] = MemcachedPersistQueue(qname, config.espersistd_uri)
print self._queues[qname]
def _get_device_queue(self, pr):
if not self._device_map.has_key(pr.device_name):
self._device_map[pr.device_name] = self._next_q
if self._next_q < self._cassandra_queues:
self._next_q += 1
else:
self._next_q = 1
return 'cassandra_{0}'.format(self._device_map[pr.device_name])
def put(self, pr):
q = self._get_device_queue(pr)
if not self.write:
print 'Noop to: {0}'.format(q)
if self.verbose > 1: print pr.json()
else:
self._queues[q].put(pr)
def main():
usage = '%prog [ -r NUM | -i NUM | -o NUM | -l NUM | -v ]'
usage += '\n\tAmount of data generated ~= r * i * (o * 2) * l'
parser = OptionParser(usage=usage)
parser.add_option('-r', '--routers', metavar='NUM_ROUTERS',
type='int', dest='routers', default=1,
help='Number of test "routers" to generate (default=%default).')
parser.add_option('-i', '--interfaces', metavar='NUM_INTERFACES',
type='int', dest='interfaces', default=2,
help='Number of test interfaces to generate on each test router (default=%default).')
parser.add_option('-o', '--oidsets', metavar='NUM_OIDSETS',
type='int', dest='oidsets', default=2,
help='Number of oidsets to assign to each fake device/router (default=%default).')
parser.add_option('-l', '--loop', metavar='NUM_LOOPS',
type='int', dest='loop', default=1,
help='Number of times to send data for each "device (default=%default)."')
parser.add_option('-p', '--prefix', metavar='PREFIX',
type='string', dest='prefix', default='fake',
help='Device name prefix - make new names (default=%default).')
parser.add_option('-W', '--write',
dest='write', action='store_true', default=False,
help='Actually write the data to the memcache queue.')
parser.add_option('-v', '--verbose',
dest='verbose', action='count', default=False,
help='Verbose output - -v, -vv, etc.')
options, args = parser.parse_args()
router_names = []
for i in range(1,5):
for c in string.lowercase:
router_names.append(c*i)
if options.routers > 26*4:
print 'There is an upper bound of {0} fake routers.'.format(26*4)
return -1
config = get_config(get_config_path())
qs = TestQueues(config, options.write, options.verbose)
oidset_oid = {}
oid_count = 0
for oidset in OIDSet.objects.filter(frequency=30)[0:options.oidsets]:
if not oidset_oid.has_key(oidset.name): oidset_oid[oidset.name] = []
for oid in oidset.oids.exclude(name='sysUpTime'):
oidset_oid[oidset.name].append(oid.name)
oid_count += 1
if options.verbose:
print 'Using following oidsets/oids for fake devices:'
pp.pprint(oidset_oid)
loopcount = 0
ts = int(time.time())
val = 100
# 43200 - 12 hrs. 1440 loops - 1/2 day of data
print 'Generating {0} data points.'.format(
options.loop*options.routers*options.interfaces*oid_count)
for iteration in xrange(options.loop):
if options.verbose: print 'Loop {0}/{1}'.format(iteration, options.loop)
for dn in router_names[0:options.routers]:
device_name = '{0}_rtr_{1}'.format(options.prefix, dn)
for oidset in oidset_oid.keys():
data = []
for oid in oidset_oid[oidset]:
for i in xrange(options.interfaces):
interface_name = 'fake_iface_{0}'.format(i)
datum = [[oid, interface_name], val]
data.append(datum)
pr = PollResult(
oidset_name=oidset,
device_name=device_name,
oid_name=oid,
timestamp=ts,
data=data,
metadata={'tsdb_flags': 1}
)
if options.verbose > 1: print pr.json()
qs.put(pr)
ts += 30
val += 50
loopcount += 1
pass
if __name__ == '__main__':
main() | <filename>util/poller_test_generator.py
#!/usr/bin/env python
"""
Generate bogus data to put into a memcached persist queue for testing.
The approximate math on how many data points will be generated:
options.loop * options.routers * options.interfaces * (options.oidsets * 2)
as most of the oidsets being pulled have 2 oids.
"""
import json
import os
import pprint
import string
import sys
import time
from optparse import OptionParser
from esmond.api.models import OIDSet
from esmond.config import get_config, get_config_path
from esmond.persist import PollResult, MemcachedPersistQueue
pp = pprint.PrettyPrinter(indent=2)
class TestQueuesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class TestQueuesWarning(Warning): pass
class TestQueues(object):
"""Manage the test queues"""
def __init__(self, config, write, verbose):
super(TestQueues, self).__init__()
self.config = config
self.write = write
self.verbose = verbose
self._queues = {}
self._device_map = {}
self._next_q = 1
if not config.persist_queues.has_key('cassandra'):
raise TestQueuesException('Config does not have cassandra persist_queues defined.')
self._cassandra_queues = config.persist_queues['cassandra'][1]
for i in xrange(1,self._cassandra_queues+1):
qname = 'cassandra_{0}'.format(i)
self._queues[qname] = MemcachedPersistQueue(qname, config.espersistd_uri)
print self._queues[qname]
def _get_device_queue(self, pr):
if not self._device_map.has_key(pr.device_name):
self._device_map[pr.device_name] = self._next_q
if self._next_q < self._cassandra_queues:
self._next_q += 1
else:
self._next_q = 1
return 'cassandra_{0}'.format(self._device_map[pr.device_name])
def put(self, pr):
q = self._get_device_queue(pr)
if not self.write:
print 'Noop to: {0}'.format(q)
if self.verbose > 1: print pr.json()
else:
self._queues[q].put(pr)
def main():
usage = '%prog [ -r NUM | -i NUM | -o NUM | -l NUM | -v ]'
usage += '\n\tAmount of data generated ~= r * i * (o * 2) * l'
parser = OptionParser(usage=usage)
parser.add_option('-r', '--routers', metavar='NUM_ROUTERS',
type='int', dest='routers', default=1,
help='Number of test "routers" to generate (default=%default).')
parser.add_option('-i', '--interfaces', metavar='NUM_INTERFACES',
type='int', dest='interfaces', default=2,
help='Number of test interfaces to generate on each test router (default=%default).')
parser.add_option('-o', '--oidsets', metavar='NUM_OIDSETS',
type='int', dest='oidsets', default=2,
help='Number of oidsets to assign to each fake device/router (default=%default).')
parser.add_option('-l', '--loop', metavar='NUM_LOOPS',
type='int', dest='loop', default=1,
help='Number of times to send data for each "device (default=%default)."')
parser.add_option('-p', '--prefix', metavar='PREFIX',
type='string', dest='prefix', default='fake',
help='Device name prefix - make new names (default=%default).')
parser.add_option('-W', '--write',
dest='write', action='store_true', default=False,
help='Actually write the data to the memcache queue.')
parser.add_option('-v', '--verbose',
dest='verbose', action='count', default=False,
help='Verbose output - -v, -vv, etc.')
options, args = parser.parse_args()
router_names = []
for i in range(1,5):
for c in string.lowercase:
router_names.append(c*i)
if options.routers > 26*4:
print 'There is an upper bound of {0} fake routers.'.format(26*4)
return -1
config = get_config(get_config_path())
qs = TestQueues(config, options.write, options.verbose)
oidset_oid = {}
oid_count = 0
for oidset in OIDSet.objects.filter(frequency=30)[0:options.oidsets]:
if not oidset_oid.has_key(oidset.name): oidset_oid[oidset.name] = []
for oid in oidset.oids.exclude(name='sysUpTime'):
oidset_oid[oidset.name].append(oid.name)
oid_count += 1
if options.verbose:
print 'Using following oidsets/oids for fake devices:'
pp.pprint(oidset_oid)
loopcount = 0
ts = int(time.time())
val = 100
# 43200 - 12 hrs. 1440 loops - 1/2 day of data
print 'Generating {0} data points.'.format(
options.loop*options.routers*options.interfaces*oid_count)
for iteration in xrange(options.loop):
if options.verbose: print 'Loop {0}/{1}'.format(iteration, options.loop)
for dn in router_names[0:options.routers]:
device_name = '{0}_rtr_{1}'.format(options.prefix, dn)
for oidset in oidset_oid.keys():
data = []
for oid in oidset_oid[oidset]:
for i in xrange(options.interfaces):
interface_name = 'fake_iface_{0}'.format(i)
datum = [[oid, interface_name], val]
data.append(datum)
pr = PollResult(
oidset_name=oidset,
device_name=device_name,
oid_name=oid,
timestamp=ts,
data=data,
metadata={'tsdb_flags': 1}
)
if options.verbose > 1: print pr.json()
qs.put(pr)
ts += 30
val += 50
loopcount += 1
pass
if __name__ == '__main__':
main() | en | 0.763185 | #!/usr/bin/env python Generate bogus data to put into a memcached persist queue for testing. The approximate math on how many data points will be generated: options.loop * options.routers * options.interfaces * (options.oidsets * 2) as most of the oidsets being pulled have 2 oids. Manage the test queues # 43200 - 12 hrs. 1440 loops - 1/2 day of data | 2.426005 | 2 |
get_user_stats.py | gchandru1/twitterprofile | 0 | 6615383 | from nltk import *
from pandas import *
from nltk.tokenize import RegexpTokenizer
import json
from pprint import pprint
def pretty_print(d):
print json.dumps(d, indent = 4)
def lexical_diversity(text):
return float(len(set(text)))/len(text)
def analyse_text(tweet):
tweet_token = word_tokenize(tweet["text"])
tweet_text = Text(tweet_token)
fdist = FreqDist(tweet_text)
counthashtag = fdist['#'] #1
countmentions = fdist['@'] #2
noofwords = len(tweet_text) #3
noofchars = len(tweet["text"]) #4
is_link = (tweet["entities"]["urls"] != []) * 1 #5
favcount = tweet["favorite_count"] if tweet["text"][:2] != 'RT' else 0 #6
rtcount = tweet["retweet_count"] if tweet["text"][:2] != 'RT' else 0 #7
lex_diversity = lexical_diversity(tweet_text) #8
return [tweet["user"].get("screen_name"),
counthashtag, countmentions, noofwords, noofchars,
is_link, favcount, rtcount, lex_diversity]
def get_user_stats(tweets):
column_names = ['user', 'counthastag', 'countmentions',
'noofwords', 'noofchars', 'is_link', 'favcount',
'rtcount', 'lex_diversity']
user_collated = DataFrame(columns=column_names)
for i, tweet in enumerate(tweets):
count_values = analyse_text(tweet)
user_collated.loc[i] = count_values
user_grouped = user_collated.groupby(['user'])
user_grouped_values = user_grouped.mean().iloc[0].values.tolist()
user_lvl_list = [tweets[0]["user"].get("screen_name")]
user_lvl_list.extend(user_grouped_values)
user_lvl_list.extend([len(user_collated)])
return user_lvl_list
def collate_user_stats():
column_names = ['user', 'avghastagscount', 'avgmentioncount',
'avgwordcount', 'avgcharcount', 'avglinkcount',
'avgfavcount', 'avgrtcount', 'lex_diversity',
'tweetcount']
all_circle_stats = DataFrame(columns=column_names)
with open('/Users/Chandru/Documents/projects/twitterprofile/all_circle_tweets.json') as tfile:
data = json.load(tfile)
for user, tweets in data.iteritems():
try:
stats = dict(zip(column_names, get_user_stats(tweets)))
all_circle_stats = all_circle_stats.append(stats, ignore_index=True)
except Exception as e:
print "No tweets for " + user + " in file"
print all_circle_stats
collate_user_stats()
| from nltk import *
from pandas import *
from nltk.tokenize import RegexpTokenizer
import json
from pprint import pprint
def pretty_print(d):
print json.dumps(d, indent = 4)
def lexical_diversity(text):
return float(len(set(text)))/len(text)
def analyse_text(tweet):
tweet_token = word_tokenize(tweet["text"])
tweet_text = Text(tweet_token)
fdist = FreqDist(tweet_text)
counthashtag = fdist['#'] #1
countmentions = fdist['@'] #2
noofwords = len(tweet_text) #3
noofchars = len(tweet["text"]) #4
is_link = (tweet["entities"]["urls"] != []) * 1 #5
favcount = tweet["favorite_count"] if tweet["text"][:2] != 'RT' else 0 #6
rtcount = tweet["retweet_count"] if tweet["text"][:2] != 'RT' else 0 #7
lex_diversity = lexical_diversity(tweet_text) #8
return [tweet["user"].get("screen_name"),
counthashtag, countmentions, noofwords, noofchars,
is_link, favcount, rtcount, lex_diversity]
def get_user_stats(tweets):
column_names = ['user', 'counthastag', 'countmentions',
'noofwords', 'noofchars', 'is_link', 'favcount',
'rtcount', 'lex_diversity']
user_collated = DataFrame(columns=column_names)
for i, tweet in enumerate(tweets):
count_values = analyse_text(tweet)
user_collated.loc[i] = count_values
user_grouped = user_collated.groupby(['user'])
user_grouped_values = user_grouped.mean().iloc[0].values.tolist()
user_lvl_list = [tweets[0]["user"].get("screen_name")]
user_lvl_list.extend(user_grouped_values)
user_lvl_list.extend([len(user_collated)])
return user_lvl_list
def collate_user_stats():
column_names = ['user', 'avghastagscount', 'avgmentioncount',
'avgwordcount', 'avgcharcount', 'avglinkcount',
'avgfavcount', 'avgrtcount', 'lex_diversity',
'tweetcount']
all_circle_stats = DataFrame(columns=column_names)
with open('/Users/Chandru/Documents/projects/twitterprofile/all_circle_tweets.json') as tfile:
data = json.load(tfile)
for user, tweets in data.iteritems():
try:
stats = dict(zip(column_names, get_user_stats(tweets)))
all_circle_stats = all_circle_stats.append(stats, ignore_index=True)
except Exception as e:
print "No tweets for " + user + " in file"
print all_circle_stats
collate_user_stats()
| en | 0.865895 | #1 #2 #3 #4 #5 #6 #7 #8 | 3.012368 | 3 |
tests/test.py | madron/mqttassistant | 0 | 6615384 | import asyncio
class Callback:
def __init__(self, delay=0):
self.delay = delay
self.called = []
async def __call__(self, **kwargs):
await asyncio.sleep(self.delay)
self.called.append(kwargs)
return kwargs
| import asyncio
class Callback:
def __init__(self, delay=0):
self.delay = delay
self.called = []
async def __call__(self, **kwargs):
await asyncio.sleep(self.delay)
self.called.append(kwargs)
return kwargs
| none | 1 | 2.891272 | 3 | |
bin/run_extractor.py | ktrianta/rustql | 12 | 6615385 | <reponame>ktrianta/rustql
#!/usr/bin/python3
import datetime
import json
import os
import subprocess
ROOT = os.path.abspath('.')
DATA_PATH = os.path.join(ROOT, 'data')
CRATES_INFO_PATH = os.path.join(DATA_PATH, 'crate-information.json')
LOG_PATH = os.path.join(DATA_PATH, 'compile.log')
COMPILATION_PATH = os.path.join(DATA_PATH, 'compilation')
MANIFEST_PATH = os.path.join(COMPILATION_PATH, 'Cargo.toml')
SCCACHE_DIR = os.path.join(DATA_PATH, 'cache')
EXTRACTOR_PATH = os.path.join(ROOT, 'rustql-extractor/target/release/rustc')
EXTRACTOR_TARGET_DIR = os.path.join(DATA_PATH, 'crates')
def collect_crates():
with open(CRATES_INFO_PATH) as fp:
return json.load(fp)
def create_manifest(crate):
with open(MANIFEST_PATH, 'w') as fp:
fp.write('''
[package]
name = "rustql-dummy"
version = "0.1.0"
edition = "2018"
[dependencies]
{} = "{}"
'''.format(crate['name'], crate['version']))
def compile_crate(crate):
result = subprocess.run(
args=['/home/vagrant/.cargo/bin/cargo', 'build', '--verbose'],
# Give 20 minutes for each crate.
timeout=20*60,
cwd=COMPILATION_PATH,
env={
# "RUST_BACKTRACE": "1",
"PATH": "/usr/local/sbin:/usr/local/bin:"
"/usr/sbin:/usr/bin:/sbin:/bin",
"SCCACHE_DIR": SCCACHE_DIR,
"EXTRACTOR_TARGET_DIR": EXTRACTOR_TARGET_DIR,
# "LD_LIBRARY_PATH": "/data/toolchain/lib/",
"RUSTC_WRAPPER": "sccache",
"RUSTC": EXTRACTOR_PATH,
},
)
def main():
crates = collect_crates()
with open(LOG_PATH, 'a') as fp:
def log(*args):
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
fp.write(timestamp)
fp.write(',')
fp.write(','.join(args))
fp.write('\n')
fp.flush()
for crate in crates:
log('START_COMPILE', crate['id'])
try:
create_manifest(crate)
compile_crate(crate)
except Exception as e:
log('ERROR', str(e))
raise
log('END_COMPILE', crate['id'])
if __name__ == '__main__':
main()
| #!/usr/bin/python3
import datetime
import json
import os
import subprocess
ROOT = os.path.abspath('.')
DATA_PATH = os.path.join(ROOT, 'data')
CRATES_INFO_PATH = os.path.join(DATA_PATH, 'crate-information.json')
LOG_PATH = os.path.join(DATA_PATH, 'compile.log')
COMPILATION_PATH = os.path.join(DATA_PATH, 'compilation')
MANIFEST_PATH = os.path.join(COMPILATION_PATH, 'Cargo.toml')
SCCACHE_DIR = os.path.join(DATA_PATH, 'cache')
EXTRACTOR_PATH = os.path.join(ROOT, 'rustql-extractor/target/release/rustc')
EXTRACTOR_TARGET_DIR = os.path.join(DATA_PATH, 'crates')
def collect_crates():
with open(CRATES_INFO_PATH) as fp:
return json.load(fp)
def create_manifest(crate):
with open(MANIFEST_PATH, 'w') as fp:
fp.write('''
[package]
name = "rustql-dummy"
version = "0.1.0"
edition = "2018"
[dependencies]
{} = "{}"
'''.format(crate['name'], crate['version']))
def compile_crate(crate):
result = subprocess.run(
args=['/home/vagrant/.cargo/bin/cargo', 'build', '--verbose'],
# Give 20 minutes for each crate.
timeout=20*60,
cwd=COMPILATION_PATH,
env={
# "RUST_BACKTRACE": "1",
"PATH": "/usr/local/sbin:/usr/local/bin:"
"/usr/sbin:/usr/bin:/sbin:/bin",
"SCCACHE_DIR": SCCACHE_DIR,
"EXTRACTOR_TARGET_DIR": EXTRACTOR_TARGET_DIR,
# "LD_LIBRARY_PATH": "/data/toolchain/lib/",
"RUSTC_WRAPPER": "sccache",
"RUSTC": EXTRACTOR_PATH,
},
)
def main():
crates = collect_crates()
with open(LOG_PATH, 'a') as fp:
def log(*args):
timestamp = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
fp.write(timestamp)
fp.write(',')
fp.write(','.join(args))
fp.write('\n')
fp.flush()
for crate in crates:
log('START_COMPILE', crate['id'])
try:
create_manifest(crate)
compile_crate(crate)
except Exception as e:
log('ERROR', str(e))
raise
log('END_COMPILE', crate['id'])
if __name__ == '__main__':
main() | en | 0.552656 | #!/usr/bin/python3 [package] name = "rustql-dummy" version = "0.1.0" edition = "2018" [dependencies] {} = "{}" # Give 20 minutes for each crate. # "RUST_BACKTRACE": "1", # "LD_LIBRARY_PATH": "/data/toolchain/lib/", | 2.0865 | 2 |
api/app/tests/redapp/test_fwi.py | bcgov/wps | 19 | 6615386 | <reponame>bcgov/wps
""" Very basic (temporary?) unit test for FWI """
import unittest
from app.utils.redapp import FWICalculateDailyStatisticsCOM
from app.utils.time import get_utc_now
class BasicFWITestCase(unittest.TestCase):
""" Very dumb, very basic unit test that should really get replaced. """
def test_fwi(self):
""" veyr basic test """
result = FWICalculateDailyStatisticsCOM(
latitude=50.6733333,
longitude=-120.4816667,
yesterday_ffmc=50,
yesterday_dmc=10,
yesterday_dc=10,
noon_temp=20,
noon_rh=30,
noon_precip=1,
noon_wind_speed=20,
calc_hourly=True,
hourly_temp=20,
hourly_rh=20,
hourly_precip=1,
hourly_wind_speed=10,
previous_hourly_ffmc=50,
use_van_wagner=False,
use_lawson_previous_hour=True,
time_of_interest=get_utc_now())
self.assertIsNotNone(result)
| """ Very basic (temporary?) unit test for FWI """
import unittest
from app.utils.redapp import FWICalculateDailyStatisticsCOM
from app.utils.time import get_utc_now
class BasicFWITestCase(unittest.TestCase):
""" Very dumb, very basic unit test that should really get replaced. """
def test_fwi(self):
""" veyr basic test """
result = FWICalculateDailyStatisticsCOM(
latitude=50.6733333,
longitude=-120.4816667,
yesterday_ffmc=50,
yesterday_dmc=10,
yesterday_dc=10,
noon_temp=20,
noon_rh=30,
noon_precip=1,
noon_wind_speed=20,
calc_hourly=True,
hourly_temp=20,
hourly_rh=20,
hourly_precip=1,
hourly_wind_speed=10,
previous_hourly_ffmc=50,
use_van_wagner=False,
use_lawson_previous_hour=True,
time_of_interest=get_utc_now())
self.assertIsNotNone(result) | en | 0.917626 | Very basic (temporary?) unit test for FWI Very dumb, very basic unit test that should really get replaced. veyr basic test | 2.832663 | 3 |
flow_models/plot.py | piotrjurkiewicz/flow_stats | 9 | 6615387 | #!/usr/bin/python3
"""
Generates plots from flow records and fitted models (requires `pandas` and `scipy`).
"""
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from .lib.data import UNITS, LINE_NBINS, load_data
from .lib.plot import plot_pdf, plot_cdf, plot_avg, save_figure, matplotlib_config, MODES_PDF, MODES_CDF
from .lib.util import logmsg
X_VALUES = ['length', 'size', 'duration', 'rate']
SIZE = 0.6
FIGSIZE = [SIZE * 11.2, SIZE * 6.8]
def plot(objects, x_val='length', ext='png', single=False, normalize=True, fft=False, cdf_modes=(), pdf_modes=(), avg_modes=()):
data = load_data(objects)
idx = None
if single:
fig = plt.figure(figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1] * 2])
ax = plt.subplot(2, 2, 1)
else:
fig = plt.figure(figsize=FIGSIZE)
ax = plt.subplot(1, 1, 1)
plt.subplots_adjust(0, 0, 1, 1)
for obj, df in data.items():
if idx is None:
idx = np.unique(np.rint(np.geomspace(df.index.min(), df.index.max(), LINE_NBINS)).astype(np.int64))
for what in ['flows', 'packets', 'octets']:
logmsg('Drawing CDF', obj, what)
plot_cdf(df, idx, x_val, what, mode={'line', 'mixture', *cdf_modes})
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel('CDF (Fraction of)')
if not single:
out = 'cdf'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
for n, what in enumerate(['flows', 'packets', 'octets']):
if single:
ax = plt.subplot(2, 2, n + 2, sharex=ax)
else:
fig, ax = plt.subplots(figsize=FIGSIZE)
plt.subplots_adjust(0, 0, 1, 1)
for obj, df in data.items():
logmsg('Drawing PDF', obj, what)
plot_pdf(df, idx, x_val, what, mode={'line', 'mixture', *pdf_modes}, normalize=normalize, fft=fft)
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel(f'PDF of {what}')
if not single:
out = f'pdf-{what}'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
if single:
out = 'single'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
for what in ['packets', 'octets', 'packet_size']:
fig, ax = plt.subplots(figsize=FIGSIZE)
for obj, df in data.items():
logmsg('Drawing AVG', obj, what)
plot_avg(df, idx, x_val, what, mode={'line', 'mixture', *avg_modes})
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel(f"Average {what.replace('_', ' ')} [bytes]")
out = f'avg-{what}'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
def parser():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--format', default='png', choices=['png', 'pdf'], help='plot file format')
p.add_argument('--single', action='store_true', help='plot PDF and CDF in single file')
p.add_argument('--no-normalize', action='store_false', help='do not normalize PDF datapoints')
p.add_argument('--fft', action='store_true', help='use FFT for calculating KDE')
p.add_argument('-P', action='append', default=[], choices=MODES_PDF, help='additional PDF plot modes (can be specified multiple times)')
p.add_argument('-C', action='append', default=[], choices=MODES_CDF, help='additional CDF plot modes (can be specified multiple times)')
p.add_argument('-x', default='length', choices=X_VALUES, help='x axis value')
p.add_argument('histogram', help='csv_hist file to plot')
p.add_argument('mixture', nargs='?', help='mixture directory to plot')
return p
def main():
app_args = parser().parse_args()
files = [app_args.histogram]
if app_args.mixture:
files.append(app_args.mixture)
with matplotlib_config(latex=False):
plot(files, app_args.x, app_args.format, app_args.single, app_args.no_normalize, app_args.fft,
app_args.C, app_args.P)
if __name__ == '__main__':
main()
| #!/usr/bin/python3
"""
Generates plots from flow records and fitted models (requires `pandas` and `scipy`).
"""
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from .lib.data import UNITS, LINE_NBINS, load_data
from .lib.plot import plot_pdf, plot_cdf, plot_avg, save_figure, matplotlib_config, MODES_PDF, MODES_CDF
from .lib.util import logmsg
X_VALUES = ['length', 'size', 'duration', 'rate']
SIZE = 0.6
FIGSIZE = [SIZE * 11.2, SIZE * 6.8]
def plot(objects, x_val='length', ext='png', single=False, normalize=True, fft=False, cdf_modes=(), pdf_modes=(), avg_modes=()):
data = load_data(objects)
idx = None
if single:
fig = plt.figure(figsize=[FIGSIZE[0] * 2.132, FIGSIZE[1] * 2])
ax = plt.subplot(2, 2, 1)
else:
fig = plt.figure(figsize=FIGSIZE)
ax = plt.subplot(1, 1, 1)
plt.subplots_adjust(0, 0, 1, 1)
for obj, df in data.items():
if idx is None:
idx = np.unique(np.rint(np.geomspace(df.index.min(), df.index.max(), LINE_NBINS)).astype(np.int64))
for what in ['flows', 'packets', 'octets']:
logmsg('Drawing CDF', obj, what)
plot_cdf(df, idx, x_val, what, mode={'line', 'mixture', *cdf_modes})
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel('CDF (Fraction of)')
if not single:
out = 'cdf'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
for n, what in enumerate(['flows', 'packets', 'octets']):
if single:
ax = plt.subplot(2, 2, n + 2, sharex=ax)
else:
fig, ax = plt.subplots(figsize=FIGSIZE)
plt.subplots_adjust(0, 0, 1, 1)
for obj, df in data.items():
logmsg('Drawing PDF', obj, what)
plot_pdf(df, idx, x_val, what, mode={'line', 'mixture', *pdf_modes}, normalize=normalize, fft=fft)
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel(f'PDF of {what}')
if not single:
out = f'pdf-{what}'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
if single:
out = 'single'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
for what in ['packets', 'octets', 'packet_size']:
fig, ax = plt.subplots(figsize=FIGSIZE)
for obj, df in data.items():
logmsg('Drawing AVG', obj, what)
plot_avg(df, idx, x_val, what, mode={'line', 'mixture', *avg_modes})
ax.set_xlabel(f'Flow {x_val} [{UNITS[x_val]}]')
ax.set_ylabel(f"Average {what.replace('_', ' ')} [bytes]")
out = f'avg-{what}'
logmsg('Saving', out)
save_figure(fig, out, ext=ext)
plt.close(fig)
logmsg('Done', out)
def parser():
p = argparse.ArgumentParser(description=__doc__)
p.add_argument('--format', default='png', choices=['png', 'pdf'], help='plot file format')
p.add_argument('--single', action='store_true', help='plot PDF and CDF in single file')
p.add_argument('--no-normalize', action='store_false', help='do not normalize PDF datapoints')
p.add_argument('--fft', action='store_true', help='use FFT for calculating KDE')
p.add_argument('-P', action='append', default=[], choices=MODES_PDF, help='additional PDF plot modes (can be specified multiple times)')
p.add_argument('-C', action='append', default=[], choices=MODES_CDF, help='additional CDF plot modes (can be specified multiple times)')
p.add_argument('-x', default='length', choices=X_VALUES, help='x axis value')
p.add_argument('histogram', help='csv_hist file to plot')
p.add_argument('mixture', nargs='?', help='mixture directory to plot')
return p
def main():
app_args = parser().parse_args()
files = [app_args.histogram]
if app_args.mixture:
files.append(app_args.mixture)
with matplotlib_config(latex=False):
plot(files, app_args.x, app_args.format, app_args.single, app_args.no_normalize, app_args.fft,
app_args.C, app_args.P)
if __name__ == '__main__':
main()
| en | 0.692441 | #!/usr/bin/python3 Generates plots from flow records and fitted models (requires `pandas` and `scipy`). | 2.231792 | 2 |
prysit/normalize.py | kevinkovalchik/my_prosit | 70 | 6615388 | import numpy
def base_peak(spectral):
max_int = spectral.max(1)
spectral = spectral / max_int[:, numpy.newaxis]
spectral = numpy.nan_to_num(spectral)
return spectral
| import numpy
def base_peak(spectral):
max_int = spectral.max(1)
spectral = spectral / max_int[:, numpy.newaxis]
spectral = numpy.nan_to_num(spectral)
return spectral
| none | 1 | 2.731359 | 3 | |
research/cv/AlphaPose/infer/sdk/main.py | mindspore-ai/models | 77 | 6615389 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''sdk main'''
import argparse
import os
import time
import cv2
import numpy as np
from api.infer import SdkApi
from config import config as cfg
def parser_args():
'''set parameter'''
parser = argparse.ArgumentParser(description="maskrcnn inference")
parser.add_argument("--img_path",
type=str,
required=True,
help="image directory.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="config/maskrcnn_ms.pipeline",
help="image file path. The default is 'config/maskrcnn_ms.pipeline'. ")
parser.add_argument(
"--model_type",
type=str,
required=False,
default="dvpp",
help="rgb: high-precision, dvpp: high performance. The default is 'dvpp'.")
parser.add_argument(
"--infer_mode",
type=str,
required=False,
default="infer",
help="infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="../data/infer_result",
help="cache dir of inference result. The default is '../data/infer_result'."
)
parser.add_argument(
"--dataset_name",
type=str,
required=True,
default="TUM",
help="dataset name."
)
parser.add_argument("--ann_file",
type=str,
required=False,
help="eval ann_file.")
return parser.parse_args()
def write_depth(path, depth, bits=1):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
:param path:
:param depth:
:param bits:
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
depth_min = depth.min()
depth_max = depth.max()
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.type)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"))
elif bits == 2:
cv2.imwrite(path + ".png", out.astype("uint16"))
return out
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): path file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(
image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception(
"Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def process_img(img_file):
'''bin preprocess'''
f = open(img_file, mode='rb')
img = np.fromfile(f, dtype=np.float32).reshape((256, 192, 3))
return img
def image_inference(pipeline_path, stream_name, img_dir, result_dir,
replace_last, dataset_name, model_type):
'''sdk process'''
sdk_api = SdkApi(pipeline_path)
if not sdk_api.init():
exit(-1)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
img_data_plugin_id = 0
dirs = []
if dataset_name == 'images':
data_set = img_dir + "images"
dirs.append("images")
elif dataset_name == "Kitti":
data_set = img_dir + "/Kitti_raw_data"
dirs = os.listdir(data_set)
elif dataset_name == "Sintel":
data_set = img_dir + "/Sintel/final_left"
dirs = os.listdir(data_set)
else:
data_set = img_dir
dirs = os.listdir(data_set)
for d in dirs:
if dataset_name == "images":
images = os.listdir(os.path.join(data_set))
elif dataset_name == "Kitti":
images = os.listdir(os.path.join(data_set, d, "images"))
elif dataset_name == "Sintel":
images = os.listdir(os.path.join(data_set, d))
else:
images = os.listdir(os.path.join(data_set, d))
total_len = len(images)
for ind, file_name in enumerate(images):
if dataset_name == "images":
file_path = os.path.join(data_set, file_name)
elif dataset_name == "Kitti":
file_path = os.path.join(data_set, d, "images", file_name)
elif dataset_name == "Sintel":
file_path = os.path.join(data_set, d, file_name)
print(img_dir, " ", d, " ", file_name)
print("file_path is ", file_path)
img_np = process_img(file_path)
img_shape = img_np.shape
print("111", img_shape)
sdk_api.send_img_input(stream_name,
img_data_plugin_id, "appsrc0",
img_np.tobytes(), img_shape)
start_time = time.time()
result = sdk_api.get_result(stream_name)
end_time = time.time() - start_time
save_path = os.path.join(result_dir, dataset_name, d)
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path, file_name)
print('.' + save_path.split('.')[-1])
save_path = save_path.replace(
'.' + save_path.split('.')[-1], '_0.bin')
print('save_path is ', save_path)
with open(save_path, "wb") as fp:
fp.write(result)
print(
f"End-2end inference, file_name: {file_path}, {ind + 1}/{total_len}, elapsed_time: {end_time}.\n"
)
if __name__ == "__main__":
args = parser_args()
stream_name1 = cfg.STREAM_NAME.encode("utf-8")
image_inference(args.pipeline_path, stream_name1, args.img_path,
args.infer_result_dir, True, args.dataset_name, args.model_type)
| # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''sdk main'''
import argparse
import os
import time
import cv2
import numpy as np
from api.infer import SdkApi
from config import config as cfg
def parser_args():
'''set parameter'''
parser = argparse.ArgumentParser(description="maskrcnn inference")
parser.add_argument("--img_path",
type=str,
required=True,
help="image directory.")
parser.add_argument(
"--pipeline_path",
type=str,
required=False,
default="config/maskrcnn_ms.pipeline",
help="image file path. The default is 'config/maskrcnn_ms.pipeline'. ")
parser.add_argument(
"--model_type",
type=str,
required=False,
default="dvpp",
help="rgb: high-precision, dvpp: high performance. The default is 'dvpp'.")
parser.add_argument(
"--infer_mode",
type=str,
required=False,
default="infer",
help="infer:only infer, eval: accuracy evaluation. The default is 'infer'.")
parser.add_argument(
"--infer_result_dir",
type=str,
required=False,
default="../data/infer_result",
help="cache dir of inference result. The default is '../data/infer_result'."
)
parser.add_argument(
"--dataset_name",
type=str,
required=True,
default="TUM",
help="dataset name."
)
parser.add_argument("--ann_file",
type=str,
required=False,
help="eval ann_file.")
return parser.parse_args()
def write_depth(path, depth, bits=1):
"""Write depth map to pfm and png file.
Args:
path (str): filepath without extension
depth (array): depth
:param path:
:param depth:
:param bits:
"""
write_pfm(path + ".pfm", depth.astype(np.float32))
depth_min = depth.min()
depth_max = depth.max()
max_val = (2 ** (8 * bits)) - 1
if depth_max - depth_min > np.finfo("float").eps:
out = max_val * (depth - depth_min) / (depth_max - depth_min)
else:
out = np.zeros(depth.shape, dtype=depth.type)
if bits == 1:
cv2.imwrite(path + ".png", out.astype("uint8"))
elif bits == 2:
cv2.imwrite(path + ".png", out.astype("uint16"))
return out
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): path file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(
image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception(
"Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n" if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
def process_img(img_file):
'''bin preprocess'''
f = open(img_file, mode='rb')
img = np.fromfile(f, dtype=np.float32).reshape((256, 192, 3))
return img
def image_inference(pipeline_path, stream_name, img_dir, result_dir,
replace_last, dataset_name, model_type):
'''sdk process'''
sdk_api = SdkApi(pipeline_path)
if not sdk_api.init():
exit(-1)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
img_data_plugin_id = 0
dirs = []
if dataset_name == 'images':
data_set = img_dir + "images"
dirs.append("images")
elif dataset_name == "Kitti":
data_set = img_dir + "/Kitti_raw_data"
dirs = os.listdir(data_set)
elif dataset_name == "Sintel":
data_set = img_dir + "/Sintel/final_left"
dirs = os.listdir(data_set)
else:
data_set = img_dir
dirs = os.listdir(data_set)
for d in dirs:
if dataset_name == "images":
images = os.listdir(os.path.join(data_set))
elif dataset_name == "Kitti":
images = os.listdir(os.path.join(data_set, d, "images"))
elif dataset_name == "Sintel":
images = os.listdir(os.path.join(data_set, d))
else:
images = os.listdir(os.path.join(data_set, d))
total_len = len(images)
for ind, file_name in enumerate(images):
if dataset_name == "images":
file_path = os.path.join(data_set, file_name)
elif dataset_name == "Kitti":
file_path = os.path.join(data_set, d, "images", file_name)
elif dataset_name == "Sintel":
file_path = os.path.join(data_set, d, file_name)
print(img_dir, " ", d, " ", file_name)
print("file_path is ", file_path)
img_np = process_img(file_path)
img_shape = img_np.shape
print("111", img_shape)
sdk_api.send_img_input(stream_name,
img_data_plugin_id, "appsrc0",
img_np.tobytes(), img_shape)
start_time = time.time()
result = sdk_api.get_result(stream_name)
end_time = time.time() - start_time
save_path = os.path.join(result_dir, dataset_name, d)
if not os.path.exists(save_path):
os.makedirs(save_path)
save_path = os.path.join(save_path, file_name)
print('.' + save_path.split('.')[-1])
save_path = save_path.replace(
'.' + save_path.split('.')[-1], '_0.bin')
print('save_path is ', save_path)
with open(save_path, "wb") as fp:
fp.write(result)
print(
f"End-2end inference, file_name: {file_path}, {ind + 1}/{total_len}, elapsed_time: {end_time}.\n"
)
if __name__ == "__main__":
args = parser_args()
stream_name1 = cfg.STREAM_NAME.encode("utf-8")
image_inference(args.pipeline_path, stream_name1, args.img_path,
args.infer_result_dir, True, args.dataset_name, args.model_type)
| en | 0.725926 | # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ sdk main set parameter Write depth map to pfm and png file. Args: path (str): filepath without extension depth (array): depth :param path: :param depth: :param bits: Write pfm file. Args: path (str): path file image (array): data scale (int, optional): Scale. Defaults to 1. # color image # greyscale bin preprocess sdk process | 2.221315 | 2 |
unsupported/office365mgmt/1.0.0/src/app.py | sais7/python-apps | 41 | 6615390 | <reponame>sais7/python-apps<filename>unsupported/office365mgmt/1.0.0/src/app.py
import socket
import asyncio
import time
import random
import json
import office365poller
from walkoff_app_sdk.app_base import AppBase
class Office365Mgmt(AppBase):
__version__ = "1.0.0"
app_name = "Office365_Mgt_API" # this needs to match "name" in api.yaml
def __init__(self, redis, logger, console_logger=None):
"""
Each app should have this __init__ to set up Redis and logging.
:param redis:
:param logger:
:param console_logger:
"""
super().__init__(redis, logger, console_logger)
def run_me_1(self, planType,tenantID,clientID,clientSecret):
#Poll last 10 min Office365
#Parse json_data with key value data
#planType = json_data['planType']
#tenantID = json_data['tenantID']
#clientID = json_data['clientID']
#clientSecret = json_data['clientSecret']
pollInterval = 10 #Assume minutes
return office365poller.pollOffice(planType,tenantID,clientID,clientSecret,pollInterval)
def run_me_2(self,planType,tenantID,clientID,clientSecret):
#Poll last 23 horus or 1380 min Office365
#Parse json_data with key value data
#planType = json_data['planType']
#tenantID = json_data['tenantID']
#clientID = json_data['clientID']
#clientSecret = json_data['clientSecret']
pollInterval = 1380 #Assume minutes
return office365poller.pollOffice(planType,tenantID,clientID,clientSecret,pollInterval)
def run_me_3(self, json_data):
return "Ran function 3"
# Write your data inside this function
async def run_o365poller(self, planType,tenantID,clientID,clientSecret, PollInterval,json_data):
# It comes in as a string, so needs to be set to JSON
try:
#json_data = json.loads(json_data)
#We are not using json_data structure at this time, getting creds directly
pass
except json.decoder.JSONDecodeError as e:
return "Couldn't decode json: %s" % e
# These are functions
switcher = {
"poll_10min" : self.run_me_1,
"poll_23hours" : self.run_me_2,
}
func = switcher.get(PollInterval, lambda: "Invalid function")
return func(planType,tenantID,clientID,clientSecret)
if __name__ == "__main__":
asyncio.run(Office365Mgmt.run(), debug=True)
| import socket
import asyncio
import time
import random
import json
import office365poller
from walkoff_app_sdk.app_base import AppBase
class Office365Mgmt(AppBase):
__version__ = "1.0.0"
app_name = "Office365_Mgt_API" # this needs to match "name" in api.yaml
def __init__(self, redis, logger, console_logger=None):
"""
Each app should have this __init__ to set up Redis and logging.
:param redis:
:param logger:
:param console_logger:
"""
super().__init__(redis, logger, console_logger)
def run_me_1(self, planType,tenantID,clientID,clientSecret):
#Poll last 10 min Office365
#Parse json_data with key value data
#planType = json_data['planType']
#tenantID = json_data['tenantID']
#clientID = json_data['clientID']
#clientSecret = json_data['clientSecret']
pollInterval = 10 #Assume minutes
return office365poller.pollOffice(planType,tenantID,clientID,clientSecret,pollInterval)
def run_me_2(self,planType,tenantID,clientID,clientSecret):
#Poll last 23 horus or 1380 min Office365
#Parse json_data with key value data
#planType = json_data['planType']
#tenantID = json_data['tenantID']
#clientID = json_data['clientID']
#clientSecret = json_data['clientSecret']
pollInterval = 1380 #Assume minutes
return office365poller.pollOffice(planType,tenantID,clientID,clientSecret,pollInterval)
def run_me_3(self, json_data):
return "Ran function 3"
# Write your data inside this function
async def run_o365poller(self, planType,tenantID,clientID,clientSecret, PollInterval,json_data):
# It comes in as a string, so needs to be set to JSON
try:
#json_data = json.loads(json_data)
#We are not using json_data structure at this time, getting creds directly
pass
except json.decoder.JSONDecodeError as e:
return "Couldn't decode json: %s" % e
# These are functions
switcher = {
"poll_10min" : self.run_me_1,
"poll_23hours" : self.run_me_2,
}
func = switcher.get(PollInterval, lambda: "Invalid function")
return func(planType,tenantID,clientID,clientSecret)
if __name__ == "__main__":
asyncio.run(Office365Mgmt.run(), debug=True) | en | 0.479558 | # this needs to match "name" in api.yaml Each app should have this __init__ to set up Redis and logging.
:param redis:
:param logger:
:param console_logger: #Poll last 10 min Office365 #Parse json_data with key value data #planType = json_data['planType'] #tenantID = json_data['tenantID'] #clientID = json_data['clientID'] #clientSecret = json_data['clientSecret'] #Assume minutes #Poll last 23 horus or 1380 min Office365 #Parse json_data with key value data #planType = json_data['planType'] #tenantID = json_data['tenantID'] #clientID = json_data['clientID'] #clientSecret = json_data['clientSecret'] #Assume minutes # Write your data inside this function # It comes in as a string, so needs to be set to JSON #json_data = json.loads(json_data) #We are not using json_data structure at this time, getting creds directly # These are functions | 2.373449 | 2 |
UltimateTicTacToe/generate_boards.py | lsangers/codingame | 0 | 6615391 | <filename>UltimateTicTacToe/generate_boards.py
from itertools import product
from collections import namedtuple
Action = namedtuple('Action', ['x_coord', 'y_coord'])
Board = namedtuple('Board', ['winner', 'actions'])
def get_winner(board):
for index in range(3):
#rows
if board[index][0] > 0 \
and board[index][0] == board[index][1] \
and board[index][0] == board[index][2]:
return board[index][0]
#cols
if board[0][index] > 0 \
and board[0][index] == board[1][index] \
and board[0][index] == board[2][index]:
return board[0][index]
#diagonals
if board[0][0] > 0 \
and board[0][0] == board[1][1] \
and board[0][0] == board[2][2]:
return board[0][0]
if board[2][0] > 0 \
and board[2][0] == board[1][1] \
and board[2][0] == board[0][2]:
return board[2][0]
return 0
def get_actions(board):
valid_actions = []
for y_coord in range(3):
for x_coord in range(3):
if board[y_coord][x_coord] == 0:
valid_actions.append(Action(x_coord, y_coord))
return valid_actions
NORMAL_BOARDS = {
grid: Board(winner := get_winner(grid), get_actions(grid) if winner == 0 else []) for grid in product(product([0, 1, 2], repeat = 3), repeat = 3)
}
print(NORMAL_BOARDS)
| <filename>UltimateTicTacToe/generate_boards.py
from itertools import product
from collections import namedtuple
Action = namedtuple('Action', ['x_coord', 'y_coord'])
Board = namedtuple('Board', ['winner', 'actions'])
def get_winner(board):
for index in range(3):
#rows
if board[index][0] > 0 \
and board[index][0] == board[index][1] \
and board[index][0] == board[index][2]:
return board[index][0]
#cols
if board[0][index] > 0 \
and board[0][index] == board[1][index] \
and board[0][index] == board[2][index]:
return board[0][index]
#diagonals
if board[0][0] > 0 \
and board[0][0] == board[1][1] \
and board[0][0] == board[2][2]:
return board[0][0]
if board[2][0] > 0 \
and board[2][0] == board[1][1] \
and board[2][0] == board[0][2]:
return board[2][0]
return 0
def get_actions(board):
valid_actions = []
for y_coord in range(3):
for x_coord in range(3):
if board[y_coord][x_coord] == 0:
valid_actions.append(Action(x_coord, y_coord))
return valid_actions
NORMAL_BOARDS = {
grid: Board(winner := get_winner(grid), get_actions(grid) if winner == 0 else []) for grid in product(product([0, 1, 2], repeat = 3), repeat = 3)
}
print(NORMAL_BOARDS)
| en | 0.187176 | #rows #cols #diagonals | 3.142705 | 3 |
drive.py | deadrobots/bots18-lexie | 0 | 6615392 | <reponame>deadrobots/bots18-lexie
import os, sys
from wallaby import *
import constants as c
# Be consistent with your naming conventions. This name should be in camelCase -LMB
def drivetimed(lspeed, rspeed, time):
motor(c.leftmotor, lspeed)
motor(c.rightmotor, rspeed)
msleep(time)
ao()
def lineFollowUntilCan():
print ("line following")
while analog(c.ET) < 2000:
if analog(c.tophat) < 1500: # on white #1200
motor(c.leftmotor, 10)
motor(c.rightmotor, 100)
else: #on black
motor(c.leftmotor, 100)
motor(c.rightmotor, 10) | import os, sys
from wallaby import *
import constants as c
# Be consistent with your naming conventions. This name should be in camelCase -LMB
def drivetimed(lspeed, rspeed, time):
motor(c.leftmotor, lspeed)
motor(c.rightmotor, rspeed)
msleep(time)
ao()
def lineFollowUntilCan():
print ("line following")
while analog(c.ET) < 2000:
if analog(c.tophat) < 1500: # on white #1200
motor(c.leftmotor, 10)
motor(c.rightmotor, 100)
else: #on black
motor(c.leftmotor, 100)
motor(c.rightmotor, 10) | en | 0.785477 | # Be consistent with your naming conventions. This name should be in camelCase -LMB # on white #1200 #on black | 2.294363 | 2 |
src/pyasl/asl/planck.py | mirofedurco/PyAstronomy | 98 | 6615393 | # -*- coding: utf-8 -*-
from PyAstronomy.pyaC import pyaErrors as PE
import numpy as np
from PyAstronomy.pyasl import _ic
def planck(T, lam=None, nu=None):
"""
Evaluate Planck's radiation law.
Depending on whether wavelength or frequency is specified as input, the
function evaluates:
.. math::
B_{\\nu} = \\frac{2\\pi h \\nu^3}{c^2} \\frac{1}{e^{\\frac{h\\nu}{kT}} - 1}
or
.. math::
B_{\\lambda} = \\frac{2\\pi h c^2}{\\lambda^5} \\frac{1}{e^{\\frac{h c}{\\lambda kT}} - 1} \\; .
If lambda is given (in meters), the output units are W/(m^2 m). To convert into erg/(cm^2 A s),
the output has to be multiplied by a factor of 1e-7.
Parameters
----------
T : float
Temperature in Kelvin.
lam : float or array, optional
Wavelength in meters.
nu : float or array, optional
Frequency in Hz.
Returns
-------
Spectral radiance : float or array
Depending on whether `lam` or `nu` were specified, returns the
spectral radiance per area and wavelength or frequency. The unit (SI)
will be W/(m^2 m) if `lam` was given and W/(m^2 Hz) if `nu` was
specified.
"""
if _ic.check["quantities"]:
from PyAstronomy import constants
c = constants.PyAConstants(unitSystem="SI")
else:
raise(PE.PyARequiredImport(
"You need to install 'quantities' to use this function.\n 'quantities' can be obtained from 'http://pypi.python.org/pypi/quantities'."))
return None
if (lam is not None) and (nu is not None):
raise(PE.PyAParameterConflict(
"Specify either 'lam' OR 'nu', but not both."))
if (lam is None) and (nu is None):
raise(PE.PyAParameterConflict("Specify either 'lam' OR 'nu'."))
# Parameters have been specified properly
if lam is not None:
result = 2.*np.pi*c.h*(c.c**2)/(lam**5)
result /= (np.exp(c.h*c.c/(lam*c.k*T)) - 1.0)
return result
elif nu is not None:
result = 2.*np.pi*c.h*(nu**3)/(c.c**2)
result /= (np.exp(c.h*nu/(c.k*T)) - 1.0)
return result
| # -*- coding: utf-8 -*-
from PyAstronomy.pyaC import pyaErrors as PE
import numpy as np
from PyAstronomy.pyasl import _ic
def planck(T, lam=None, nu=None):
"""
Evaluate Planck's radiation law.
Depending on whether wavelength or frequency is specified as input, the
function evaluates:
.. math::
B_{\\nu} = \\frac{2\\pi h \\nu^3}{c^2} \\frac{1}{e^{\\frac{h\\nu}{kT}} - 1}
or
.. math::
B_{\\lambda} = \\frac{2\\pi h c^2}{\\lambda^5} \\frac{1}{e^{\\frac{h c}{\\lambda kT}} - 1} \\; .
If lambda is given (in meters), the output units are W/(m^2 m). To convert into erg/(cm^2 A s),
the output has to be multiplied by a factor of 1e-7.
Parameters
----------
T : float
Temperature in Kelvin.
lam : float or array, optional
Wavelength in meters.
nu : float or array, optional
Frequency in Hz.
Returns
-------
Spectral radiance : float or array
Depending on whether `lam` or `nu` were specified, returns the
spectral radiance per area and wavelength or frequency. The unit (SI)
will be W/(m^2 m) if `lam` was given and W/(m^2 Hz) if `nu` was
specified.
"""
if _ic.check["quantities"]:
from PyAstronomy import constants
c = constants.PyAConstants(unitSystem="SI")
else:
raise(PE.PyARequiredImport(
"You need to install 'quantities' to use this function.\n 'quantities' can be obtained from 'http://pypi.python.org/pypi/quantities'."))
return None
if (lam is not None) and (nu is not None):
raise(PE.PyAParameterConflict(
"Specify either 'lam' OR 'nu', but not both."))
if (lam is None) and (nu is None):
raise(PE.PyAParameterConflict("Specify either 'lam' OR 'nu'."))
# Parameters have been specified properly
if lam is not None:
result = 2.*np.pi*c.h*(c.c**2)/(lam**5)
result /= (np.exp(c.h*c.c/(lam*c.k*T)) - 1.0)
return result
elif nu is not None:
result = 2.*np.pi*c.h*(nu**3)/(c.c**2)
result /= (np.exp(c.h*nu/(c.k*T)) - 1.0)
return result
| en | 0.589919 | # -*- coding: utf-8 -*- Evaluate Planck's radiation law. Depending on whether wavelength or frequency is specified as input, the function evaluates: .. math:: B_{\\nu} = \\frac{2\\pi h \\nu^3}{c^2} \\frac{1}{e^{\\frac{h\\nu}{kT}} - 1} or .. math:: B_{\\lambda} = \\frac{2\\pi h c^2}{\\lambda^5} \\frac{1}{e^{\\frac{h c}{\\lambda kT}} - 1} \\; . If lambda is given (in meters), the output units are W/(m^2 m). To convert into erg/(cm^2 A s), the output has to be multiplied by a factor of 1e-7. Parameters ---------- T : float Temperature in Kelvin. lam : float or array, optional Wavelength in meters. nu : float or array, optional Frequency in Hz. Returns ------- Spectral radiance : float or array Depending on whether `lam` or `nu` were specified, returns the spectral radiance per area and wavelength or frequency. The unit (SI) will be W/(m^2 m) if `lam` was given and W/(m^2 Hz) if `nu` was specified. # Parameters have been specified properly | 3.19894 | 3 |
dawp2020/hy-data-analysis-with-python-2020/part01-e15_two_dice_comprehension/src/two_dice_comprehension.py | ored95/data-analysis-course | 0 | 6615394 | <gh_stars>0
#!/usr/bin/env python3
def main():
print('\n'.join(['({0}, {1})'.format(i, j) for j in range(1, 5) for i in range(1, 5) if i + j == 5]))
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
def main():
print('\n'.join(['({0}, {1})'.format(i, j) for j in range(1, 5) for i in range(1, 5) if i + j == 5]))
if __name__ == "__main__":
main() | fr | 0.221828 | #!/usr/bin/env python3 | 3.796533 | 4 |
src/bar.py | jlcrodrigues/Hangman | 4 | 6615395 | import pygame
from config import *
class Bar():
def __init__(self, coords, length, pos):
'''
@text - What's displayed by the button.
@coords - Button's position. [x,y]
@length - Bar's length.
@pos - The relative position of the button to the bar.
'''
self.length = length
self.pos = pos
self.coords = coords
self.pointing = False
self.held = False
self.hitbox = [self.coords[0], self.coords[0] + length, self.coords[1], self.coords[1] + 40]
self.volume = pos
def render(self, win, dark_theme):
'''Renders the button on the screen.
@win - The game window.
@dark_theme - True if datk theme is on.'''
if dark_theme:
bar = pygame.image.load("../assets/images/bar.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
else:
bar = pygame.image.load("../assets/images/bar_light.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button_light.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
win.blit(bar, self.coords)
win.blit(bar_button, (self.coords[0] + int(self.pos * self.length), self.coords[1]))
def allign_right(self, distance, width):
'''Alligns the button to the right.
@distance - Distance to the right border.
@width - The window's width.'''
self.coords[0] = width - distance - self.length
self.hitbox[0] = self.coords[0]
self.hitbox[1] = self.hitbox[0] + self.length
def set_volume(self, volume):
'''Sets the button's volume.
@volume - The new volume.
'''
self.volume = volume
def drag(self, mouse_pos, mouse_down):
'''Holds the logic for when the button is dragged.
@pos - Mouse's Coordinates.
@mouse_down - True if the mouse if being pressed.
'''
pygame.mixer.init()
button_point = pygame.mixer.Sound("../assets/sounds/button_point.mp3")
button_point.set_volume(self.volume)
button_click = pygame.mixer.Sound("../assets/sounds/button_click.mp3")
button_click.set_volume(self.volume)
if mouse_pos[0] > self.hitbox[0] and mouse_pos[0] < self.hitbox[1]: #clicked in the button
if mouse_pos[1] > self.hitbox[2] and mouse_pos[1] < self.hitbox[3]:
if not self.pointing and not self.held: pygame.mixer.Sound.play(button_point)
self.pointing = True
else: self.pointing = False
else: self.pointing = False
if self.pointing and mouse_down:
if not self.held: pygame.mixer.Sound.play(button_click)
self.held = True
self.held = mouse_down
if self.held and self.pointing:
if mouse_pos[0] <= self.coords[0]: self.pos = 0.0
elif mouse_pos[0] >= self.coords[0] + self.length: self.pos = 1.0
else: self.pos = (mouse_pos[0] - self.coords[0]) / self.length
| import pygame
from config import *
class Bar():
def __init__(self, coords, length, pos):
'''
@text - What's displayed by the button.
@coords - Button's position. [x,y]
@length - Bar's length.
@pos - The relative position of the button to the bar.
'''
self.length = length
self.pos = pos
self.coords = coords
self.pointing = False
self.held = False
self.hitbox = [self.coords[0], self.coords[0] + length, self.coords[1], self.coords[1] + 40]
self.volume = pos
def render(self, win, dark_theme):
'''Renders the button on the screen.
@win - The game window.
@dark_theme - True if datk theme is on.'''
if dark_theme:
bar = pygame.image.load("../assets/images/bar.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
else:
bar = pygame.image.load("../assets/images/bar_light.png")
if not self.pointing: bar_button = pygame.image.load("../assets/images/bar_button_light.png")
else: bar_button = pygame.image.load("../assets/images/bar_button_point.png")
win.blit(bar, self.coords)
win.blit(bar_button, (self.coords[0] + int(self.pos * self.length), self.coords[1]))
def allign_right(self, distance, width):
'''Alligns the button to the right.
@distance - Distance to the right border.
@width - The window's width.'''
self.coords[0] = width - distance - self.length
self.hitbox[0] = self.coords[0]
self.hitbox[1] = self.hitbox[0] + self.length
def set_volume(self, volume):
'''Sets the button's volume.
@volume - The new volume.
'''
self.volume = volume
def drag(self, mouse_pos, mouse_down):
'''Holds the logic for when the button is dragged.
@pos - Mouse's Coordinates.
@mouse_down - True if the mouse if being pressed.
'''
pygame.mixer.init()
button_point = pygame.mixer.Sound("../assets/sounds/button_point.mp3")
button_point.set_volume(self.volume)
button_click = pygame.mixer.Sound("../assets/sounds/button_click.mp3")
button_click.set_volume(self.volume)
if mouse_pos[0] > self.hitbox[0] and mouse_pos[0] < self.hitbox[1]: #clicked in the button
if mouse_pos[1] > self.hitbox[2] and mouse_pos[1] < self.hitbox[3]:
if not self.pointing and not self.held: pygame.mixer.Sound.play(button_point)
self.pointing = True
else: self.pointing = False
else: self.pointing = False
if self.pointing and mouse_down:
if not self.held: pygame.mixer.Sound.play(button_click)
self.held = True
self.held = mouse_down
if self.held and self.pointing:
if mouse_pos[0] <= self.coords[0]: self.pos = 0.0
elif mouse_pos[0] >= self.coords[0] + self.length: self.pos = 1.0
else: self.pos = (mouse_pos[0] - self.coords[0]) / self.length
| en | 0.702795 | @text - What's displayed by the button. @coords - Button's position. [x,y] @length - Bar's length. @pos - The relative position of the button to the bar. Renders the button on the screen. @win - The game window. @dark_theme - True if datk theme is on. Alligns the button to the right. @distance - Distance to the right border. @width - The window's width. Sets the button's volume. @volume - The new volume. Holds the logic for when the button is dragged. @pos - Mouse's Coordinates. @mouse_down - True if the mouse if being pressed. #clicked in the button | 3.129196 | 3 |
isbi_model_predict.py | TiagoFilipeSousaGoncalves/Deep-Image-Segmentation-for-Breast-Contour-Detection | 1 | 6615396 | <filename>isbi_model_predict.py
# Imports
import os
import matplotlib.pyplot as plt
import _pickle as cPickle
import numpy as np
import cv2
import time
# Keras Imports
from keras.applications import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.models import Model, load_model
from keras.layers import Input,Conv2D,MaxPooling2D,Conv2DTranspose, multiply, concatenate, Dense, Flatten, Dropout, Lambda
from keras.callbacks import ModelCheckpoint
from keras import losses
from keras import backend as K
# ISBI Model Imports
from code.isbi_model.isbi_model_utilities import create_isbi_model, generate_isbi_predictions
# GLOBAL VARIABLES
# FOLDS
FOLDS = [i for i in range(5)]
# ISBI Model Results Directory
isbi_model_results_dir = 'results/isbi-model'
# ISBI Model Weights Directory
isbi_model_weights_dir = os.path.join(isbi_model_results_dir, 'weights')
# ISBI Model Predictions Directory
isbi_model_predictions_dir = os.path.join(isbi_model_results_dir, 'predictions')
if os.path.isdir(isbi_model_predictions_dir) == False:
os.mkdir(isbi_model_predictions_dir)
# Iterate through folds
for fold in FOLDS:
print('Current fold: {}'.format(fold))
# ISBI Model Weights Path
weights_path = os.path.join(isbi_model_weights_dir, 'isbi_model_trained_Fold_{}.hdf5'.format(fold))
# Data Paths
data_path = 'data/resized'
# X_train path
X_train_path = os.path.join(data_path, 'X_train_221.pickle')
# X_test path
X_test_path = os.path.join(data_path, 'X_test_221.pickle')
# Test indices path
test_indices_path = 'data/train-test-indices/test_indices_list.pickle'
# Start time measurement for the algorithm performance check
startTime = time.time()
# Generate predictions
isbi_preds = generate_isbi_predictions(
X_train_path=X_train_path,
X_test_path=X_test_path,
test_indices_path=test_indices_path,
fold=fold,
isbi_model_weights_path=weights_path
)
# Time measurement
print ('The script took {} seconds for fold {}!'.format(time.time() - startTime, fold))
with open(os.path.join(isbi_model_predictions_dir, 'isbi_preds_w_only_CV_Fold_{}.pickle'.format(fold)), 'wb') as f:
cPickle.dump(isbi_preds, f, -1)
print('ISBI Model Predictions Finished.') | <filename>isbi_model_predict.py
# Imports
import os
import matplotlib.pyplot as plt
import _pickle as cPickle
import numpy as np
import cv2
import time
# Keras Imports
from keras.applications import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.models import Model, load_model
from keras.layers import Input,Conv2D,MaxPooling2D,Conv2DTranspose, multiply, concatenate, Dense, Flatten, Dropout, Lambda
from keras.callbacks import ModelCheckpoint
from keras import losses
from keras import backend as K
# ISBI Model Imports
from code.isbi_model.isbi_model_utilities import create_isbi_model, generate_isbi_predictions
# GLOBAL VARIABLES
# FOLDS
FOLDS = [i for i in range(5)]
# ISBI Model Results Directory
isbi_model_results_dir = 'results/isbi-model'
# ISBI Model Weights Directory
isbi_model_weights_dir = os.path.join(isbi_model_results_dir, 'weights')
# ISBI Model Predictions Directory
isbi_model_predictions_dir = os.path.join(isbi_model_results_dir, 'predictions')
if os.path.isdir(isbi_model_predictions_dir) == False:
os.mkdir(isbi_model_predictions_dir)
# Iterate through folds
for fold in FOLDS:
print('Current fold: {}'.format(fold))
# ISBI Model Weights Path
weights_path = os.path.join(isbi_model_weights_dir, 'isbi_model_trained_Fold_{}.hdf5'.format(fold))
# Data Paths
data_path = 'data/resized'
# X_train path
X_train_path = os.path.join(data_path, 'X_train_221.pickle')
# X_test path
X_test_path = os.path.join(data_path, 'X_test_221.pickle')
# Test indices path
test_indices_path = 'data/train-test-indices/test_indices_list.pickle'
# Start time measurement for the algorithm performance check
startTime = time.time()
# Generate predictions
isbi_preds = generate_isbi_predictions(
X_train_path=X_train_path,
X_test_path=X_test_path,
test_indices_path=test_indices_path,
fold=fold,
isbi_model_weights_path=weights_path
)
# Time measurement
print ('The script took {} seconds for fold {}!'.format(time.time() - startTime, fold))
with open(os.path.join(isbi_model_predictions_dir, 'isbi_preds_w_only_CV_Fold_{}.pickle'.format(fold)), 'wb') as f:
cPickle.dump(isbi_preds, f, -1)
print('ISBI Model Predictions Finished.') | en | 0.61427 | # Imports # Keras Imports # ISBI Model Imports # GLOBAL VARIABLES # FOLDS # ISBI Model Results Directory # ISBI Model Weights Directory # ISBI Model Predictions Directory # Iterate through folds # ISBI Model Weights Path # Data Paths # X_train path # X_test path # Test indices path # Start time measurement for the algorithm performance check # Generate predictions # Time measurement | 2.248137 | 2 |
CodingInterview2/43_NumberOf1/number_of1.py | hscspring/TheAlgorithms-Python | 10 | 6615397 | <reponame>hscspring/TheAlgorithms-Python<filename>CodingInterview2/43_NumberOf1/number_of1.py
"""
面试题 43:从 1 到 n 整数中 1 出现的次数
题目:输入一个整数 n,求从 1 到 n 这 n 个整数的十进制表示中 1 出现的次数。例如
输入 12,从 1 到 12 这些整数中包含 1 的数字有 1,10,11 和 12,1 一共出现了 5 次。
"""
def number_of_one(n: int) -> int:
"""
how many ones between 1 to n.
Parameters
-----------
Returns
---------
Notes
------
given number n, we have log(n) positions
"""
res = 0
for i in range(1, n+1):
res += number_one(i)
return res
def number_one(n: int):
res = 0
while n:
if n%10 == 1:
res += 1
n = n//10
return res
def len_num(n: int) -> int:
res = 0
while n:
res += 1
n //= 10
return res
def first_num(n: int) -> int:
while n >= 10:
n //= 10
return n
def number_of_one_recursion(n: int) -> int:
lenth = len_num(n)
first = first_num(n)
nums_first = 0
if lenth == 0 or first == 0:
return 0
# 21345
if lenth == 1 and first > 0:
return 1
# 10000-19999 的第一个位中 1 的数目
if lenth > 1 and first > 1:
nums_first = 10 ** (lenth - 1)
elif lenth > 1 and first == 1:
nums_first = n - 10 ** (lenth - 1) + 1
# 01346-21345 除了第一位之外的数位中 1 的数目
# 排列组合
nums_other = first * (lenth - 1) * 10 ** (lenth - 2)
# 1-1345 中 1 的数目
nums_recur = number_of_one_recursion(n % 10**(lenth-1))
return nums_first + nums_other + nums_recur
def count_one(num):
str_lst = map(str, range(1, num+1))
return "".join(str_lst).count("1")
if __name__ == '__main__':
num = 100
num = 2134522
import time
t0 = time.time()
res = number_of_one_recursion(num)
print(res)
print(time.time()-t0)
t0 = time.time()
res = count_one(num)
print(res)
print(time.time()-t0)
t0 = time.time()
res = number_of_one(num)
print(res)
print(time.time()-t0)
| """
面试题 43:从 1 到 n 整数中 1 出现的次数
题目:输入一个整数 n,求从 1 到 n 这 n 个整数的十进制表示中 1 出现的次数。例如
输入 12,从 1 到 12 这些整数中包含 1 的数字有 1,10,11 和 12,1 一共出现了 5 次。
"""
def number_of_one(n: int) -> int:
"""
how many ones between 1 to n.
Parameters
-----------
Returns
---------
Notes
------
given number n, we have log(n) positions
"""
res = 0
for i in range(1, n+1):
res += number_one(i)
return res
def number_one(n: int):
res = 0
while n:
if n%10 == 1:
res += 1
n = n//10
return res
def len_num(n: int) -> int:
res = 0
while n:
res += 1
n //= 10
return res
def first_num(n: int) -> int:
while n >= 10:
n //= 10
return n
def number_of_one_recursion(n: int) -> int:
lenth = len_num(n)
first = first_num(n)
nums_first = 0
if lenth == 0 or first == 0:
return 0
# 21345
if lenth == 1 and first > 0:
return 1
# 10000-19999 的第一个位中 1 的数目
if lenth > 1 and first > 1:
nums_first = 10 ** (lenth - 1)
elif lenth > 1 and first == 1:
nums_first = n - 10 ** (lenth - 1) + 1
# 01346-21345 除了第一位之外的数位中 1 的数目
# 排列组合
nums_other = first * (lenth - 1) * 10 ** (lenth - 2)
# 1-1345 中 1 的数目
nums_recur = number_of_one_recursion(n % 10**(lenth-1))
return nums_first + nums_other + nums_recur
def count_one(num):
str_lst = map(str, range(1, num+1))
return "".join(str_lst).count("1")
if __name__ == '__main__':
num = 100
num = 2134522
import time
t0 = time.time()
res = number_of_one_recursion(num)
print(res)
print(time.time()-t0)
t0 = time.time()
res = count_one(num)
print(res)
print(time.time()-t0)
t0 = time.time()
res = number_of_one(num)
print(res)
print(time.time()-t0) | zh | 0.98169 | 面试题 43:从 1 到 n 整数中 1 出现的次数 题目:输入一个整数 n,求从 1 到 n 这 n 个整数的十进制表示中 1 出现的次数。例如 输入 12,从 1 到 12 这些整数中包含 1 的数字有 1,10,11 和 12,1 一共出现了 5 次。 how many ones between 1 to n. Parameters ----------- Returns --------- Notes ------ given number n, we have log(n) positions # 21345 # 10000-19999 的第一个位中 1 的数目 # 01346-21345 除了第一位之外的数位中 1 的数目 # 排列组合 # 1-1345 中 1 的数目 | 3.590511 | 4 |
ptth/tests/test_ptth.py | wagamama/python-ptth | 0 | 6615398 | <gh_stars>0
# -*- coding: utf-8 -*-
import threading
import socket
from select import select
import unittest
import ptth
class TestServer(threading.Thread):
def __init__(self, ready_event):
super(TestServer, self).__init__()
self._ready_event = ready_event
self._stop_event = threading.Event()
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 7000))
sock.listen(5)
conn, _ = sock.accept()
while True:
r_list, _, _ = select([conn], [], [])
if self._stop_event.is_set():
conn.close()
break
if len(r_list) > 0:
data = conn.recv(4096)
if not data:
break
else:
req = ptth.Request.load(data)
if (req.method == 'POST' and
req.headers['Connection'] == 'Upgrade' and
req.headers['Upgrade'] == 'PTTH/1.0'):
if self.error:
conn.send(ptth.Response(200).dump())
else:
conn.send(ptth.Response(101).dump())
self._ready_event.wait()
conn.send(ptth.Request('POST', '/').dump())
sock.close()
def serve(self, error=False):
self.error = error
self.start()
def close(self):
self._stop_event.set()
class TestHandler(ptth.Handler):
def __init__(self, events):
self.events = events
self.ready = False
self.request = False
self.error = False
self.close = False
def ready_to_handle(self):
self.ready = True
if self.events['ready'] is not None:
self.events['ready'].set()
def handle_request(self, request):
self.request = True
if self.events['request'] is not None:
self.events['request'].set()
def handle_error(self, error):
self.error = True
if self.events['error'] is not None:
self.events['error'].set()
def handle_close(self):
self.close = True
if self.events['close'] is not None:
self.events['close'].set()
class TestPTTH(unittest.TestCase):
def setUp(self):
self.events = {}
self.events['ready'] = threading.Event()
self.events['request'] = threading.Event()
self.events['error'] = threading.Event()
self.events['close'] = threading.Event()
self.ready_event = threading.Event()
self.handler = TestHandler(self.events)
self.server = TestServer(self.ready_event)
def tearDown(self):
self.server.close()
def test_ptth(self):
self.server.serve()
s = ptth.Session(self.handler)
s.serve('http://localhost:7000/')
self.events['ready'].wait()
self.ready_event.set()
self.events['request'].wait()
s.close()
self.events['close'].wait()
self.assertTrue(self.handler.ready)
self.assertTrue(self.handler.request)
self.assertFalse(self.handler.error)
self.assertTrue(self.handler.close)
def test_error(self):
self.server.serve(error=True)
s = ptth.Session(self.handler)
s.serve('http://localhost:7000/')
self.events['error'].wait()
s.close()
self.events['close'].wait()
self.assertFalse(self.handler.ready)
self.assertFalse(self.handler.request)
self.assertTrue(self.handler.error)
self.assertTrue(self.handler.close)
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
import threading
import socket
from select import select
import unittest
import ptth
class TestServer(threading.Thread):
def __init__(self, ready_event):
super(TestServer, self).__init__()
self._ready_event = ready_event
self._stop_event = threading.Event()
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 7000))
sock.listen(5)
conn, _ = sock.accept()
while True:
r_list, _, _ = select([conn], [], [])
if self._stop_event.is_set():
conn.close()
break
if len(r_list) > 0:
data = conn.recv(4096)
if not data:
break
else:
req = ptth.Request.load(data)
if (req.method == 'POST' and
req.headers['Connection'] == 'Upgrade' and
req.headers['Upgrade'] == 'PTTH/1.0'):
if self.error:
conn.send(ptth.Response(200).dump())
else:
conn.send(ptth.Response(101).dump())
self._ready_event.wait()
conn.send(ptth.Request('POST', '/').dump())
sock.close()
def serve(self, error=False):
self.error = error
self.start()
def close(self):
self._stop_event.set()
class TestHandler(ptth.Handler):
def __init__(self, events):
self.events = events
self.ready = False
self.request = False
self.error = False
self.close = False
def ready_to_handle(self):
self.ready = True
if self.events['ready'] is not None:
self.events['ready'].set()
def handle_request(self, request):
self.request = True
if self.events['request'] is not None:
self.events['request'].set()
def handle_error(self, error):
self.error = True
if self.events['error'] is not None:
self.events['error'].set()
def handle_close(self):
self.close = True
if self.events['close'] is not None:
self.events['close'].set()
class TestPTTH(unittest.TestCase):
def setUp(self):
self.events = {}
self.events['ready'] = threading.Event()
self.events['request'] = threading.Event()
self.events['error'] = threading.Event()
self.events['close'] = threading.Event()
self.ready_event = threading.Event()
self.handler = TestHandler(self.events)
self.server = TestServer(self.ready_event)
def tearDown(self):
self.server.close()
def test_ptth(self):
self.server.serve()
s = ptth.Session(self.handler)
s.serve('http://localhost:7000/')
self.events['ready'].wait()
self.ready_event.set()
self.events['request'].wait()
s.close()
self.events['close'].wait()
self.assertTrue(self.handler.ready)
self.assertTrue(self.handler.request)
self.assertFalse(self.handler.error)
self.assertTrue(self.handler.close)
def test_error(self):
self.server.serve(error=True)
s = ptth.Session(self.handler)
s.serve('http://localhost:7000/')
self.events['error'].wait()
s.close()
self.events['close'].wait()
self.assertFalse(self.handler.ready)
self.assertFalse(self.handler.request)
self.assertTrue(self.handler.error)
self.assertTrue(self.handler.close)
if __name__ == '__main__':
unittest.main() | en | 0.769321 | # -*- coding: utf-8 -*- | 2.548089 | 3 |
dependencies.py | esitarski/CrossMgr | 25 | 6615399 | <gh_stars>10-100
#!/usr/bin/env python
import os
import argparse
import subprocess
import compileall
import platform
is_windows = (platform.system() == 'Windows')
dependencies = [
'requests',
'waitress',
'xlsxwriter',
'pygments',
'xlrd',
'pytz',
'fpdf',
'natural-keys',
'xlwt',
'netifaces',
'whoosh',
'qrcode',
'tornado',
'ftputil'
]
uninstall_dependencies = [
'south',
]
def update_dependencies( upgrade ):
print( 'Updating Dependencies...' )
pip = 'C:/Python27/Scripts/pip.exe'
if os.path.isfile(pip):
print( 'Found "pip" at "{}".'.format(pip) )
else:
pip = 'pip'
for d in dependencies:
args = [pip, 'install', d]
if upgrade:
args.append('--upgrade')
print( ' '.join(args) )
subprocess.call( args )
for d in uninstall_dependencies:
args = [pip, 'uninstall', d]
print( ' '.join(args) )
subprocess.call( args )
print( 'Removing old compiled files...' )
for root, dirs, files in os.walk( '.' ):
for f in files:
fname = os.path.join( root, f )
if os.path.splitext(fname)[1] == '.pyc':
os.remove( fname )
print( 'Pre-compiling source code...' )
compileall.compile_dir( '.', quiet=True )
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Update CrossMgr Dependencies' )
parser.add_argument(
'--upgrade',
action='store_true',
default=False,
)
args = parser.parse_args()
update_dependencies( args.upgrade )
| #!/usr/bin/env python
import os
import argparse
import subprocess
import compileall
import platform
is_windows = (platform.system() == 'Windows')
dependencies = [
'requests',
'waitress',
'xlsxwriter',
'pygments',
'xlrd',
'pytz',
'fpdf',
'natural-keys',
'xlwt',
'netifaces',
'whoosh',
'qrcode',
'tornado',
'ftputil'
]
uninstall_dependencies = [
'south',
]
def update_dependencies( upgrade ):
print( 'Updating Dependencies...' )
pip = 'C:/Python27/Scripts/pip.exe'
if os.path.isfile(pip):
print( 'Found "pip" at "{}".'.format(pip) )
else:
pip = 'pip'
for d in dependencies:
args = [pip, 'install', d]
if upgrade:
args.append('--upgrade')
print( ' '.join(args) )
subprocess.call( args )
for d in uninstall_dependencies:
args = [pip, 'uninstall', d]
print( ' '.join(args) )
subprocess.call( args )
print( 'Removing old compiled files...' )
for root, dirs, files in os.walk( '.' ):
for f in files:
fname = os.path.join( root, f )
if os.path.splitext(fname)[1] == '.pyc':
os.remove( fname )
print( 'Pre-compiling source code...' )
compileall.compile_dir( '.', quiet=True )
if __name__ == '__main__':
parser = argparse.ArgumentParser( description='Update CrossMgr Dependencies' )
parser.add_argument(
'--upgrade',
action='store_true',
default=False,
)
args = parser.parse_args()
update_dependencies( args.upgrade ) | ru | 0.26433 | #!/usr/bin/env python | 2.732097 | 3 |
xCopy.py | ca7dEm0n/xCopy | 0 | 6615400 | <filename>xCopy.py
# coding: utf-8
'''
@Author: cA7dEm0n
@Blog: http://www.a-cat.cn
@Since: 2020-05-22 23:18:10
@Motto: 欲目千里,更上一层
@message: X剪切板
'''
import os
import sys
from queue import Queue
from threading import Timer
import plugins
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
from lib.utils import logging
XCOPY_MAX_JOB = os.environ.get("XCOPY_MAX_JOB", 1)
PLUGINS_LIST = {
_: getattr(plugins, _) for _ in dir(plugins) if "Plugins" in _
}
def runPluginsJob(q):
'''
description: 执行插件任务
'''
while not q.empty():
_run_job_func = q.get()
try:
_run_job_func.main()
except Exception as error:
logging.info(error)
q.task_done()
def putPluginsJob(job, **kwargs):
'''
description: 生成插件任务
'''
_log = kwargs.get('log', False)
for i,k in PLUGINS_LIST.items():
if _log:
logging.info("[.] Push [%s] job"%(i))
job.put(k)
if __name__ == "__main__":
from random import randint
for _ in PLUGINS_LIST:
logging.info("[.] Load [%s] Plugins." % _ )
# 队列列表
jobs = Queue(maxsize=XCOPY_MAX_JOB)
while True:
# 生成任务
_log = randint(0, 100) == 100
putPluginsJob(jobs, log=_log)
# 执行任务
worker = Timer(1, runPluginsJob, args=(jobs, ))
worker.start()
worker.join()
| <filename>xCopy.py
# coding: utf-8
'''
@Author: cA7dEm0n
@Blog: http://www.a-cat.cn
@Since: 2020-05-22 23:18:10
@Motto: 欲目千里,更上一层
@message: X剪切板
'''
import os
import sys
from queue import Queue
from threading import Timer
import plugins
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, BASE_DIR)
from lib.utils import logging
XCOPY_MAX_JOB = os.environ.get("XCOPY_MAX_JOB", 1)
PLUGINS_LIST = {
_: getattr(plugins, _) for _ in dir(plugins) if "Plugins" in _
}
def runPluginsJob(q):
'''
description: 执行插件任务
'''
while not q.empty():
_run_job_func = q.get()
try:
_run_job_func.main()
except Exception as error:
logging.info(error)
q.task_done()
def putPluginsJob(job, **kwargs):
'''
description: 生成插件任务
'''
_log = kwargs.get('log', False)
for i,k in PLUGINS_LIST.items():
if _log:
logging.info("[.] Push [%s] job"%(i))
job.put(k)
if __name__ == "__main__":
from random import randint
for _ in PLUGINS_LIST:
logging.info("[.] Load [%s] Plugins." % _ )
# 队列列表
jobs = Queue(maxsize=XCOPY_MAX_JOB)
while True:
# 生成任务
_log = randint(0, 100) == 100
putPluginsJob(jobs, log=_log)
# 执行任务
worker = Timer(1, runPluginsJob, args=(jobs, ))
worker.start()
worker.join()
| zh | 0.586816 | # coding: utf-8 @Author: cA7dEm0n @Blog: http://www.a-cat.cn @Since: 2020-05-22 23:18:10 @Motto: 欲目千里,更上一层 @message: X剪切板 description: 执行插件任务 description: 生成插件任务 # 队列列表 # 生成任务 # 执行任务 | 2.25252 | 2 |
craamvert/__init__.py | craam/craamvert | 0 | 6615401 | from instruments.sst.sst import SST
from instruments.poemas.poemas import POEMAS
| from instruments.sst.sst import SST
from instruments.poemas.poemas import POEMAS
| none | 1 | 1.048687 | 1 | |
SnowieCaster/backends/__init__.py | AndreiErshov/SnowieCaster | 1 | 6615402 | #pylint: disable=C0114
from . import memory
__all__ = ["memory"]
| #pylint: disable=C0114
from . import memory
__all__ = ["memory"]
| en | 0.440183 | #pylint: disable=C0114 | 1.032325 | 1 |
data_preparation/missing_value.py | kmsk99/data_science_toolbar | 0 | 6615403 | for col in df_train.columns:
msperc = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(
col, 100 * (df_train[col].isnull().sum() / df_train[col].shape[0]))
print(msperc)
# train 데이터 각 column의 결측치가 몇 %인지 확인합니다.
# df_train[col].isnull().sum() : 해당 열의 결측치가 몇개인지 알 수 있게하는 문장입니다. (TRUE=1(결측치), FALSE=0으로 계산)
# df_train[col].shape[0] : 해당 열의 차원 (열이 지정되어 있으므로 행의 갯수를 보여줍니다.)
# 100 * (df_train[col].isnull().sum() / df_train[col].shape[0] : 위의 설명을 통해 %를 출력해주는 문장임을 알 수 있습니다.
for col in df_test.columns:
msperc = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(
col, 100 * (df_test[col].isnull().sum() / df_test[col].shape[0]))
print(msperc)
# test 데이터도 확인해줍니다.
# train, test 모두 PoolQc 데이터가 가장 결측치가 많습니다.
missing = df_train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar(figsize=(12, 6))
# 직관적으로 확인하기 위해 barplot을 그려봅니다.
| for col in df_train.columns:
msperc = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(
col, 100 * (df_train[col].isnull().sum() / df_train[col].shape[0]))
print(msperc)
# train 데이터 각 column의 결측치가 몇 %인지 확인합니다.
# df_train[col].isnull().sum() : 해당 열의 결측치가 몇개인지 알 수 있게하는 문장입니다. (TRUE=1(결측치), FALSE=0으로 계산)
# df_train[col].shape[0] : 해당 열의 차원 (열이 지정되어 있으므로 행의 갯수를 보여줍니다.)
# 100 * (df_train[col].isnull().sum() / df_train[col].shape[0] : 위의 설명을 통해 %를 출력해주는 문장임을 알 수 있습니다.
for col in df_test.columns:
msperc = 'column: {:>10}\t Percent of NaN value: {:.2f}%'.format(
col, 100 * (df_test[col].isnull().sum() / df_test[col].shape[0]))
print(msperc)
# test 데이터도 확인해줍니다.
# train, test 모두 PoolQc 데이터가 가장 결측치가 많습니다.
missing = df_train.isnull().sum()
missing = missing[missing > 0]
missing.sort_values(inplace=True)
missing.plot.bar(figsize=(12, 6))
# 직관적으로 확인하기 위해 barplot을 그려봅니다.
| ko | 0.998681 | # train 데이터 각 column의 결측치가 몇 %인지 확인합니다. # df_train[col].isnull().sum() : 해당 열의 결측치가 몇개인지 알 수 있게하는 문장입니다. (TRUE=1(결측치), FALSE=0으로 계산) # df_train[col].shape[0] : 해당 열의 차원 (열이 지정되어 있으므로 행의 갯수를 보여줍니다.) # 100 * (df_train[col].isnull().sum() / df_train[col].shape[0] : 위의 설명을 통해 %를 출력해주는 문장임을 알 수 있습니다. # test 데이터도 확인해줍니다. # train, test 모두 PoolQc 데이터가 가장 결측치가 많습니다. # 직관적으로 확인하기 위해 barplot을 그려봅니다. | 2.959034 | 3 |
pipelines/make_masks.py | planck-npipe/toast-npipe | 1 | 6615404 | <filename>pipelines/make_masks.py<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
#from spice import ispice
import healpy as hp
fwhm_map_deg = 10.
fwhm_mask_deg = 5.
fwhm_map = np.radians(fwhm_map_deg)
fwhm_mask = np.radians(fwhm_mask_deg)
lmax_mask = 128
lmax = 512
nside = 512
npix = 12 * nside ** 2
fname_dipo = '/global/cscratch1/sd/keskital/hfi_pipe/dipole_nside{:04}.fits' \
''.format(nside)
print('Loading', fname_dipo)
dipo = hp.read_map(fname_dipo, verbose=False)
freqs = [30, 353]
imaps = []
pmaps = []
for freq in freqs:
fname = '/global/cscratch1/sd/keskital/npipe_maps/npipe6v20/' \
'npipe6v20_{:03}_map.fits'.format(freq)
print('Loading ', fname)
fgmap = hp.ud_grade(
hp.read_map(fname, range(3), verbose=False, nest=True), nside,
order_in='NEST', order_out='RING')
print('Smoothing')
fgmap = hp.smoothing(fgmap, fwhm=fwhm_map, lmax=lmax, iter=0, verbose=False)
fgi = fgmap[0] - dipo
fgp = np.sqrt(fgmap[1]**2 + fgmap[2]**2)
print('Sorting')
fgi_sorted = np.sort(fgi)
fgp_sorted = np.sort(fgp)
imaps.append((fgi, fgi_sorted))
pmaps.append((fgp, fgp_sorted))
# Tabulate the sky fraction associated with each pixel limit
pixlims = np.arange(0, npix, npix//100)
fskies = np.zeros(pixlims.size)
mask = np.zeros(npix)
for ilim, pixlim in enumerate(pixlims):
mask[:] = 1
for imap, imap_sorted in imaps:
mask[imap > imap_sorted[pixlim]] = False
for pmap, pmap_sorted in pmaps:
mask[pmap > pmap_sorted[pixlim]] = False
mask = hp.smoothing(mask, fwhm=fwhm_mask, lmax=lmax_mask, verbose=False)
fskies[ilim] = np.sum(mask) / npix
pixlims[0] = 0
fskies[0] = 0
pixlims[-1] = npix - 1
fskies[-1] = 1
#for cut in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95]:
for cut in [55, 65]:
pixlim = int(np.interp(cut/100, fskies, pixlims))
mask[:] = 1
for imap, imap_sorted in imaps:
mask[imap > imap_sorted[pixlim]] = False
for pmap, pmap_sorted in pmaps:
mask[pmap > pmap_sorted[pixlim]] = False
smask = hp.smoothing(mask, fwhm=fwhm_mask, lmax=lmax_mask, verbose=False)
smask[mask == 0] = 0
smask[smask < 0] = 0
fsky = np.sum(mask) / npix
print('cut = {}, fsky = {}'.format(cut, fsky))
header = [
('fsky', fsky, 'Effective sky area'),
('cut', cut, 'Fractional cut in temperature and polarization'),
('fwhmmap', fwhm_map_deg, 'Map smoothing [degrees]'),
('fwhmmask', fwhm_mask_deg, 'Mask apodization [degrees]'),
]
for nsideout in [256, 512, 1024, 2048]:
maskout = hp.ud_grade(smask, nsideout)
fname = 'clmask_{:02}fsky_nside{:04}.fits'.format(
int(cut), nsideout)
hp.write_map(fname, maskout, dtype=np.float32, extra_header=header, overwrite=True)
print('Mask saved in {}'.format(fname))
| <filename>pipelines/make_masks.py<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
#from spice import ispice
import healpy as hp
fwhm_map_deg = 10.
fwhm_mask_deg = 5.
fwhm_map = np.radians(fwhm_map_deg)
fwhm_mask = np.radians(fwhm_mask_deg)
lmax_mask = 128
lmax = 512
nside = 512
npix = 12 * nside ** 2
fname_dipo = '/global/cscratch1/sd/keskital/hfi_pipe/dipole_nside{:04}.fits' \
''.format(nside)
print('Loading', fname_dipo)
dipo = hp.read_map(fname_dipo, verbose=False)
freqs = [30, 353]
imaps = []
pmaps = []
for freq in freqs:
fname = '/global/cscratch1/sd/keskital/npipe_maps/npipe6v20/' \
'npipe6v20_{:03}_map.fits'.format(freq)
print('Loading ', fname)
fgmap = hp.ud_grade(
hp.read_map(fname, range(3), verbose=False, nest=True), nside,
order_in='NEST', order_out='RING')
print('Smoothing')
fgmap = hp.smoothing(fgmap, fwhm=fwhm_map, lmax=lmax, iter=0, verbose=False)
fgi = fgmap[0] - dipo
fgp = np.sqrt(fgmap[1]**2 + fgmap[2]**2)
print('Sorting')
fgi_sorted = np.sort(fgi)
fgp_sorted = np.sort(fgp)
imaps.append((fgi, fgi_sorted))
pmaps.append((fgp, fgp_sorted))
# Tabulate the sky fraction associated with each pixel limit
pixlims = np.arange(0, npix, npix//100)
fskies = np.zeros(pixlims.size)
mask = np.zeros(npix)
for ilim, pixlim in enumerate(pixlims):
mask[:] = 1
for imap, imap_sorted in imaps:
mask[imap > imap_sorted[pixlim]] = False
for pmap, pmap_sorted in pmaps:
mask[pmap > pmap_sorted[pixlim]] = False
mask = hp.smoothing(mask, fwhm=fwhm_mask, lmax=lmax_mask, verbose=False)
fskies[ilim] = np.sum(mask) / npix
pixlims[0] = 0
fskies[0] = 0
pixlims[-1] = npix - 1
fskies[-1] = 1
#for cut in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95]:
for cut in [55, 65]:
pixlim = int(np.interp(cut/100, fskies, pixlims))
mask[:] = 1
for imap, imap_sorted in imaps:
mask[imap > imap_sorted[pixlim]] = False
for pmap, pmap_sorted in pmaps:
mask[pmap > pmap_sorted[pixlim]] = False
smask = hp.smoothing(mask, fwhm=fwhm_mask, lmax=lmax_mask, verbose=False)
smask[mask == 0] = 0
smask[smask < 0] = 0
fsky = np.sum(mask) / npix
print('cut = {}, fsky = {}'.format(cut, fsky))
header = [
('fsky', fsky, 'Effective sky area'),
('cut', cut, 'Fractional cut in temperature and polarization'),
('fwhmmap', fwhm_map_deg, 'Map smoothing [degrees]'),
('fwhmmask', fwhm_mask_deg, 'Mask apodization [degrees]'),
]
for nsideout in [256, 512, 1024, 2048]:
maskout = hp.ud_grade(smask, nsideout)
fname = 'clmask_{:02}fsky_nside{:04}.fits'.format(
int(cut), nsideout)
hp.write_map(fname, maskout, dtype=np.float32, extra_header=header, overwrite=True)
print('Mask saved in {}'.format(fname))
| en | 0.657883 | #from spice import ispice # Tabulate the sky fraction associated with each pixel limit #for cut in [10, 20, 30, 40, 50, 60, 70, 80, 90, 95]: | 1.804093 | 2 |
software_Non_AI/google_calendar.py | KKshitiz/J.A.R.V.I.S | 5 | 6615405 | <reponame>KKshitiz/J.A.R.V.I.S
from __future__ import print_function
import datetime
import pickle
import os.path
import re
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
def authenticate_google():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'root_dir+\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
def get_events(n, service):
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print(f'Getting the upcoming {n} events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=n, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
e='You have no events scheduled today.'
print(e)
return e
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
e="You have an event titled "+ event['summary']+" scheduled at "+re.findall('T([0-9][0-9]:[0-9][0-9]):[0-9][0-9]',start)[0]
print(e)
return e
def setupCalendar():
try:
global service
service = authenticate_google()
except:
global s
s="Authentication failed from google servers."
def startCalendar(n):
try:
s=get_events(n,service)
except:
s= "Failed to get calendar data."
return s
if __name__ == "__main__":
setupCalendar()
startCalendar(2) | from __future__ import print_function
import datetime
import pickle
import os.path
import re
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
def authenticate_google():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'root_dir+\\credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
return service
def get_events(n, service):
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print(f'Getting the upcoming {n} events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=n, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
if not events:
e='You have no events scheduled today.'
print(e)
return e
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
e="You have an event titled "+ event['summary']+" scheduled at "+re.findall('T([0-9][0-9]:[0-9][0-9]):[0-9][0-9]',start)[0]
print(e)
return e
def setupCalendar():
try:
global service
service = authenticate_google()
except:
global s
s="Authentication failed from google servers."
def startCalendar(n):
try:
s=get_events(n,service)
except:
s= "Failed to get calendar data."
return s
if __name__ == "__main__":
setupCalendar()
startCalendar(2) | en | 0.726921 | Shows basic usage of the Google Calendar API. Prints the start and name of the next 10 events on the user's calendar. # Call the Calendar API # 'Z' indicates UTC time | 2.720418 | 3 |
reactivex/operators/_pairwise.py | christiansandberg/RxPY | 0 | 6615406 | from typing import Callable, Optional, Tuple, TypeVar, cast
from reactivex import Observable, abc
_T = TypeVar("_T")
def pairwise_() -> Callable[[Observable[_T]], Observable[Tuple[_T, _T]]]:
def pairwise(source: Observable[_T]) -> Observable[Tuple[_T, _T]]:
"""Partially applied pairwise operator.
Returns a new observable that triggers on the second and
subsequent triggerings of the input observable. The Nth
triggering of the input observable passes the arguments from
the N-1th and Nth triggering as a pair. The argument passed to
the N-1th triggering is held in hidden internal state until the
Nth triggering occurs.
Returns:
An observable that triggers on successive pairs of
observations from the input observable as an array.
"""
def subscribe(
observer: abc.ObserverBase[Tuple[_T, _T]],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
has_previous = False
previous: _T = cast(_T, None)
def on_next(x: _T) -> None:
nonlocal has_previous, previous
pair = None
with source.lock:
if has_previous:
pair = (previous, x)
else:
has_previous = True
previous = x
if pair:
observer.on_next(pair)
return source.subscribe(on_next, observer.on_error, observer.on_completed)
return Observable(subscribe)
return pairwise
__all__ = ["pairwise_"]
| from typing import Callable, Optional, Tuple, TypeVar, cast
from reactivex import Observable, abc
_T = TypeVar("_T")
def pairwise_() -> Callable[[Observable[_T]], Observable[Tuple[_T, _T]]]:
def pairwise(source: Observable[_T]) -> Observable[Tuple[_T, _T]]:
"""Partially applied pairwise operator.
Returns a new observable that triggers on the second and
subsequent triggerings of the input observable. The Nth
triggering of the input observable passes the arguments from
the N-1th and Nth triggering as a pair. The argument passed to
the N-1th triggering is held in hidden internal state until the
Nth triggering occurs.
Returns:
An observable that triggers on successive pairs of
observations from the input observable as an array.
"""
def subscribe(
observer: abc.ObserverBase[Tuple[_T, _T]],
scheduler: Optional[abc.SchedulerBase] = None,
) -> abc.DisposableBase:
has_previous = False
previous: _T = cast(_T, None)
def on_next(x: _T) -> None:
nonlocal has_previous, previous
pair = None
with source.lock:
if has_previous:
pair = (previous, x)
else:
has_previous = True
previous = x
if pair:
observer.on_next(pair)
return source.subscribe(on_next, observer.on_error, observer.on_completed)
return Observable(subscribe)
return pairwise
__all__ = ["pairwise_"]
| en | 0.8479 | Partially applied pairwise operator. Returns a new observable that triggers on the second and subsequent triggerings of the input observable. The Nth triggering of the input observable passes the arguments from the N-1th and Nth triggering as a pair. The argument passed to the N-1th triggering is held in hidden internal state until the Nth triggering occurs. Returns: An observable that triggers on successive pairs of observations from the input observable as an array. | 2.815668 | 3 |
mysite/blog/views.py | nat-chan/mydjango | 0 | 6615407 | from django.shortcuts import get_object_or_404
from django.http import Http404
from django.template.response import TemplateResponse
from blog.models import Post
def post_list(request):
return TemplateResponse(
request,
"post_list.html",
{"posts": Post.objects.all()},
)
def post_detail(request, post_id):
post = get_object_or_404(Post, id=post_id)
return TemplateResponse(
request,
"post_detail.html",
{"post": post},
)
| from django.shortcuts import get_object_or_404
from django.http import Http404
from django.template.response import TemplateResponse
from blog.models import Post
def post_list(request):
return TemplateResponse(
request,
"post_list.html",
{"posts": Post.objects.all()},
)
def post_detail(request, post_id):
post = get_object_or_404(Post, id=post_id)
return TemplateResponse(
request,
"post_detail.html",
{"post": post},
)
| none | 1 | 1.989911 | 2 | |
single-shot-pose/demo.py | take-cheeze/models | 112 | 6615408 | <reponame>take-cheeze/models<filename>single-shot-pose/demo.py<gh_stars>100-1000
import argparse
import matplotlib.pyplot as plt
import numpy as np
import chainer
from chainercv.utils import read_image
from chainercv.visualizations import vis_image
from lib.ssp import SSPYOLOv2
from lib.vis_point import vis_point
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('image')
args = parser.parse_args()
img = read_image(args.image)
model = SSPYOLOv2()
chainer.serializers.load_npz(args.pretrained_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
points, labels, scores = model.predict([img])
point = points[0]
label = labels[0]
score = scores[0]
vis_point(img, point[:1])
plt.show()
if __name__ == '__main__':
main()
| import argparse
import matplotlib.pyplot as plt
import numpy as np
import chainer
from chainercv.utils import read_image
from chainercv.visualizations import vis_image
from lib.ssp import SSPYOLOv2
from lib.vis_point import vis_point
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('image')
args = parser.parse_args()
img = read_image(args.image)
model = SSPYOLOv2()
chainer.serializers.load_npz(args.pretrained_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
points, labels, scores = model.predict([img])
point = points[0]
label = labels[0]
score = scores[0]
vis_point(img, point[:1])
plt.show()
if __name__ == '__main__':
main() | none | 1 | 2.404876 | 2 | |
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_traffmon_netflow_cfg.py | tkamata-test/ydk-py | 0 | 6615409 | <gh_stars>0
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'NfSamplingModeEnum' : _MetaInfoEnum('NfSamplingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'random':'random',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NfCacheAgingModeEnum' : _MetaInfoEnum('NfCacheAgingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'normal':'normal',
'permanent':'permanent',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NetFlow.FlowExporterMaps.FlowExporterMap.Udp' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
False,
[
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('1024', '65535')], [],
''' Configure Destination UDP port
''',
'destination_port',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
False,
[
_MetaInfoClassMember('interface-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting interface
table
''',
'interface_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sampler-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting sampler table
''',
'sampler_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting vrf table
''',
'vrf_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'options',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
False,
[
_MetaInfoClassMember('version-number', ATTRIBUTE, 'int' , None, None,
[('9', '10')], [],
''' Export version number
''',
'version_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('common-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify custom timeout for the template
''',
'common_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('data-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Data template configuration options
''',
'data_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options', REFERENCE_CLASS, 'Options' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
[], [],
''' Specify options for exporting templates
''',
'options',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Option template configuration options
''',
'options_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'version',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
False,
[
_MetaInfoClassMember('version', REFERENCE_LIST, 'Version' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
[], [],
''' Configure export version options
''',
'version',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'versions',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Destination' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
False,
[
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Destination IPv4 address
''',
'ip_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPV6 address of the tunnel destination
''',
'ipv6_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'destination',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap',
False,
[
_MetaInfoClassMember('exporter-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter map name
''',
'exporter_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('destination', REFERENCE_CLASS, 'Destination' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
[], [],
''' Configure export destination (collector)
''',
'destination',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Specify DSCP value for export packets
''',
'dscp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Configure source interface for collector
''',
'source_interface',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
[], [],
''' Use UDP as transport protocol
''',
'udp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('versions', REFERENCE_CLASS, 'Versions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
[], [],
''' Specify export version parameters
''',
'versions',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps',
False,
[
_MetaInfoClassMember('flow-exporter-map', REFERENCE_LIST, 'FlowExporterMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap',
[], [],
''' Exporter map name
''',
'flow_exporter_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
False,
[
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'NfSamplingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfSamplingModeEnum',
[], [],
''' Sampling mode
''',
'mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Sampling interval in units of packets
''',
'interval',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sample-number', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets to be sampled in the
sampling interval
''',
'sample_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-mode',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
False,
[
_MetaInfoClassMember('sampling-mode', REFERENCE_LIST, 'SamplingMode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
[], [],
''' Configure sampling mode
''',
'sampling_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-modes',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap',
False,
[
_MetaInfoClassMember('sampler-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Sampler map name
''',
'sampler_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('sampling-modes', REFERENCE_CLASS, 'SamplingModes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
[], [],
''' Configure packet sampling mode
''',
'sampling_modes',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps',
False,
[
_MetaInfoClassMember('flow-sampler-map', REFERENCE_LIST, 'FlowSamplerMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap',
[], [],
''' Sampler map name
''',
'flow_sampler_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-performance-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow' : {
'meta_info' : _MetaInfoClass('NetFlow',
False,
[
_MetaInfoClassMember('flow-exporter-maps', REFERENCE_CLASS, 'FlowExporterMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps',
[], [],
''' Configure a flow exporter map
''',
'flow_exporter_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-performance-table', REFERENCE_CLASS, 'FlowMonitorMapPerformanceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable',
[], [],
''' Configure a performance traffic flow monitor map
''',
'flow_monitor_map_performance_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-table', REFERENCE_CLASS, 'FlowMonitorMapTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable',
[], [],
''' Flow monitor map configuration
''',
'flow_monitor_map_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-sampler-maps', REFERENCE_CLASS, 'FlowSamplerMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps',
[], [],
''' Flow sampler map configuration
''',
'flow_sampler_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'net-flow',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
}
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Udp']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Destination']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info']
_meta_table['NetFlow.FlowExporterMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
| import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'NfSamplingModeEnum' : _MetaInfoEnum('NfSamplingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'random':'random',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NfCacheAgingModeEnum' : _MetaInfoEnum('NfCacheAgingModeEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg',
{
'normal':'normal',
'permanent':'permanent',
}, 'Cisco-IOS-XR-traffmon-netflow-cfg', _yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg']),
'NetFlow.FlowExporterMaps.FlowExporterMap.Udp' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
False,
[
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('1024', '65535')], [],
''' Configure Destination UDP port
''',
'destination_port',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'udp',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
False,
[
_MetaInfoClassMember('interface-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting interface
table
''',
'interface_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sampler-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting sampler table
''',
'sampler_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-table-export-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify timeout for exporting vrf table
''',
'vrf_table_export_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'options',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
False,
[
_MetaInfoClassMember('version-number', ATTRIBUTE, 'int' , None, None,
[('9', '10')], [],
''' Export version number
''',
'version_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('common-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify custom timeout for the template
''',
'common_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('data-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Data template configuration options
''',
'data_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options', REFERENCE_CLASS, 'Options' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options',
[], [],
''' Specify options for exporting templates
''',
'options',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('options-template-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Option template configuration options
''',
'options_template_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'version',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Versions' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
False,
[
_MetaInfoClassMember('version', REFERENCE_LIST, 'Version' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version',
[], [],
''' Configure export version options
''',
'version',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'versions',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap.Destination' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
False,
[
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], ['(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Destination IPv4 address
''',
'ip_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPV6 address of the tunnel destination
''',
'ipv6_address',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'destination',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps.FlowExporterMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps.FlowExporterMap',
False,
[
_MetaInfoClassMember('exporter-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter map name
''',
'exporter_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('destination', REFERENCE_CLASS, 'Destination' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Destination',
[], [],
''' Configure export destination (collector)
''',
'destination',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('dscp', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' Specify DSCP value for export packets
''',
'dscp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('source-interface', ATTRIBUTE, 'str' , None, None,
[], ['(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Configure source interface for collector
''',
'source_interface',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('udp', REFERENCE_CLASS, 'Udp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Udp',
[], [],
''' Use UDP as transport protocol
''',
'udp',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('versions', REFERENCE_CLASS, 'Versions' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap.Versions',
[], [],
''' Specify export version parameters
''',
'versions',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowExporterMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowExporterMaps',
False,
[
_MetaInfoClassMember('flow-exporter-map', REFERENCE_LIST, 'FlowExporterMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps.FlowExporterMap',
[], [],
''' Exporter map name
''',
'flow_exporter_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-exporter-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
False,
[
_MetaInfoClassMember('mode', REFERENCE_ENUM_CLASS, 'NfSamplingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfSamplingModeEnum',
[], [],
''' Sampling mode
''',
'mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('interval', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' Sampling interval in units of packets
''',
'interval',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('sample-number', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of packets to be sampled in the
sampling interval
''',
'sample_number',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-mode',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
False,
[
_MetaInfoClassMember('sampling-mode', REFERENCE_LIST, 'SamplingMode' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode',
[], [],
''' Configure sampling mode
''',
'sampling_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'sampling-modes',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps.FlowSamplerMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps.FlowSamplerMap',
False,
[
_MetaInfoClassMember('sampler-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Sampler map name
''',
'sampler_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('sampling-modes', REFERENCE_CLASS, 'SamplingModes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes',
[], [],
''' Configure packet sampling mode
''',
'sampling_modes',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowSamplerMaps' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowSamplerMaps',
False,
[
_MetaInfoClassMember('flow-sampler-map', REFERENCE_LIST, 'FlowSamplerMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps.FlowSamplerMap',
[], [],
''' Sampler map name
''',
'flow_sampler_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-sampler-maps',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
False,
[
_MetaInfoClassMember('filtered', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether data should be filtered
''',
'filtered',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('out-phys-int', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Specify whether it exports the physical output
interface
''',
'out_phys_int',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'option',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
False,
[
_MetaInfoClassMember('exporter-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Exporter name
''',
'exporter_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporter',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
False,
[
_MetaInfoClassMember('exporter', REFERENCE_LIST, 'Exporter' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter',
[], [],
''' Configure exporter to be used by the
monitor-map
''',
'exporter',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'exporters',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
False,
[
_MetaInfoClassMember('label', ATTRIBUTE, 'int' , None, None,
[('1', '6')], [],
''' Enter label value for MPLS record type
''',
'label',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Flow record format (Either 'ipv4-raw'
,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4',
'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as')
''',
'record_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'record',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
False,
[
_MetaInfoClassMember('monitor-map-name', ATTRIBUTE, 'str' , None, None,
[(0, 32)], [],
''' Monitor map name
''',
'monitor_map_name',
'Cisco-IOS-XR-traffmon-netflow-cfg', True),
_MetaInfoClassMember('cache-active-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the active flow cache aging timeout
''',
'cache_active_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-aging-mode', REFERENCE_ENUM_CLASS, 'NfCacheAgingModeEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NfCacheAgingModeEnum',
[], [],
''' Specify the flow cache aging mode
''',
'cache_aging_mode',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-entries', ATTRIBUTE, 'int' , None, None,
[('4096', '1000000')], [],
''' Specify the number of entries in the flow cache
''',
'cache_entries',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-inactive-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('0', '604800')], [],
''' Specify the inactive flow cache aging timeout
''',
'cache_inactive_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-timeout-rate-limit', ATTRIBUTE, 'int' , None, None,
[('1', '1000000')], [],
''' Specify the maximum number of entries to age
each second
''',
'cache_timeout_rate_limit',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('cache-update-aging-timeout', ATTRIBUTE, 'int' , None, None,
[('1', '604800')], [],
''' Specify the update flow cache aging timeout
''',
'cache_update_aging_timeout',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('exporters', REFERENCE_CLASS, 'Exporters' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters',
[], [],
''' Configure exporters to be used by the
monitor-map
''',
'exporters',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('option', REFERENCE_CLASS, 'Option' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option',
[], [],
''' Specify an option for the flow cache
''',
'option',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('record', REFERENCE_CLASS, 'Record' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record',
[], [],
''' Specify a flow record format
''',
'record',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow.FlowMonitorMapPerformanceTable' : {
'meta_info' : _MetaInfoClass('NetFlow.FlowMonitorMapPerformanceTable',
False,
[
_MetaInfoClassMember('flow-monitor-map', REFERENCE_LIST, 'FlowMonitorMap' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap',
[], [],
''' Monitor map name
''',
'flow_monitor_map',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'flow-monitor-map-performance-table',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
'NetFlow' : {
'meta_info' : _MetaInfoClass('NetFlow',
False,
[
_MetaInfoClassMember('flow-exporter-maps', REFERENCE_CLASS, 'FlowExporterMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowExporterMaps',
[], [],
''' Configure a flow exporter map
''',
'flow_exporter_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-performance-table', REFERENCE_CLASS, 'FlowMonitorMapPerformanceTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapPerformanceTable',
[], [],
''' Configure a performance traffic flow monitor map
''',
'flow_monitor_map_performance_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-monitor-map-table', REFERENCE_CLASS, 'FlowMonitorMapTable' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowMonitorMapTable',
[], [],
''' Flow monitor map configuration
''',
'flow_monitor_map_table',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
_MetaInfoClassMember('flow-sampler-maps', REFERENCE_CLASS, 'FlowSamplerMaps' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg', 'NetFlow.FlowSamplerMaps',
[], [],
''' Flow sampler map configuration
''',
'flow_sampler_maps',
'Cisco-IOS-XR-traffmon-netflow-cfg', False),
],
'Cisco-IOS-XR-traffmon-netflow-cfg',
'net-flow',
_yang_ns._namespaces['Cisco-IOS-XR-traffmon-netflow-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_traffmon_netflow_cfg'
),
},
}
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version.Options']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions.Version']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Udp']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Versions']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap.Destination']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info']
_meta_table['NetFlow.FlowExporterMaps.FlowExporterMap']['meta_info'].parent =_meta_table['NetFlow.FlowExporterMaps']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes.SamplingMode']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap.SamplingModes']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps.FlowSamplerMap']['meta_info'].parent =_meta_table['NetFlow.FlowSamplerMaps']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapTable']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters.Exporter']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Option']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Exporters']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap.Record']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable.FlowMonitorMap']['meta_info'].parent =_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info']
_meta_table['NetFlow.FlowExporterMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowSamplerMaps']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info']
_meta_table['NetFlow.FlowMonitorMapPerformanceTable']['meta_info'].parent =_meta_table['NetFlow']['meta_info'] | en | 0.341128 | Configure Destination UDP port Specify timeout for exporting interface table Specify timeout for exporting sampler table Specify timeout for exporting vrf table Export version number Specify custom timeout for the template Data template configuration options Specify options for exporting templates Option template configuration options Configure export version options Destination IPv4 address IPV6 address of the tunnel destination VRF name Exporter map name Configure export destination (collector) Specify DSCP value for export packets Configure source interface for collector Use UDP as transport protocol Specify export version parameters Exporter map name Sampling mode Sampling interval in units of packets Number of packets to be sampled in the sampling interval Configure sampling mode Sampler map name Configure packet sampling mode Sampler map name Specify whether data should be filtered Specify whether it exports the physical output interface Exporter name Configure exporter to be used by the monitor-map Enter label value for MPLS record type Flow record format (Either 'ipv4-raw' ,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4', 'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as') Monitor map name Specify the active flow cache aging timeout Specify the flow cache aging mode Specify the number of entries in the flow cache Specify the inactive flow cache aging timeout Specify the maximum number of entries to age each second Specify the update flow cache aging timeout Configure exporters to be used by the monitor-map Specify an option for the flow cache Specify a flow record format Monitor map name Specify whether data should be filtered Specify whether it exports the physical output interface Exporter name Configure exporter to be used by the monitor-map Enter label value for MPLS record type Flow record format (Either 'ipv4-raw' ,'ipv4-peer-as', 'ipv6', 'mpls', 'mpls-ipv4', 'mpls-ipv6', 'mpls-ipv4-ipv6', 'ipv6-peer-as') Monitor map name Specify the active flow cache aging timeout Specify the flow cache aging mode Specify the number of entries in the flow cache Specify the inactive flow cache aging timeout Specify the maximum number of entries to age each second Specify the update flow cache aging timeout Configure exporters to be used by the monitor-map Specify an option for the flow cache Specify a flow record format Monitor map name Configure a flow exporter map Configure a performance traffic flow monitor map Flow monitor map configuration Flow sampler map configuration | 1.541583 | 2 |
src/test/signal_checkpoint.py | cgjones/rr | 3 | 6615410 | <filename>src/test/signal_checkpoint.py
from rrutil import *
send_gdb('b sighandler\n')
expect_gdb('Breakpoint 1')
send_gdb('c\n')
expect_gdb('Program received signal SIGILL')
expect_gdb('ud2')
send_gdb('checkpoint\n')
expect_gdb('= 1')
send_gdb('c\n')
expect_gdb('Breakpoint 1, sighandler')
send_gdb("restart 1\n");
expect_gdb('Breakpoint 1, sighandler')
ok()
| <filename>src/test/signal_checkpoint.py
from rrutil import *
send_gdb('b sighandler\n')
expect_gdb('Breakpoint 1')
send_gdb('c\n')
expect_gdb('Program received signal SIGILL')
expect_gdb('ud2')
send_gdb('checkpoint\n')
expect_gdb('= 1')
send_gdb('c\n')
expect_gdb('Breakpoint 1, sighandler')
send_gdb("restart 1\n");
expect_gdb('Breakpoint 1, sighandler')
ok()
| none | 1 | 1.639403 | 2 | |
py/cidoc_crm_types/properties/p59i_is_located_on_or_within.py | minorg/cidoc-crm-types | 0 | 6615411 | <gh_stars>0
from .p157_is_at_rest_relative_to import P157IsAtRestRelativeTo
from dataclasses import dataclass
@dataclass
class P59iIsLocatedOnOrWithin(P157IsAtRestRelativeTo):
URI = "http://erlangen-crm.org/current/P59i_is_located_on_or_within"
| from .p157_is_at_rest_relative_to import P157IsAtRestRelativeTo
from dataclasses import dataclass
@dataclass
class P59iIsLocatedOnOrWithin(P157IsAtRestRelativeTo):
URI = "http://erlangen-crm.org/current/P59i_is_located_on_or_within" | none | 1 | 1.8389 | 2 | |
third_party/xiuminglib/xiuminglib/vis/plot.py | leehsiu/nerfactor | 183 | 6615412 | # pylint: disable=blacklisted-name
from os.path import join, dirname
import numpy as np
from ..log import get_logger
logger = get_logger()
from .. import const
from ..os import makedirs, open_file
class Plot:
def __init__(
self,
legend_fontsize=20,
legend_loc=0,
figsize=(14, 14),
figtitle=None,
figtitle_fontsize=20,
xlabel=None,
xlabel_fontsize=20,
ylabel=None,
ylabel_fontsize=20,
zlabel=None,
zlabel_fontsize=20,
xlim=None,
ylim=None,
zlim=None,
xticks=None,
xticks_fontsize=10,
xticks_rotation=0,
yticks=None,
yticks_fontsize=10,
yticks_rotation=0,
zticks=None,
zticks_fontsize=10,
zticks_rotation=0,
grid=True,
labels=None,
outpath=None):
"""Plotter.
Args:
legend_fontsize (int, optional): Legend font size.
legend_loc (str, optional): Legend location: ``'best'``,
``'upper right'``, ``'lower left'``, ``'right'``,
``'center left'``, ``'lower center'``, ``'upper center'``,
``'center'``, etc. Effective only when ``labels`` is not
``None``.
figsize (tuple, optional): Width and height of the figure in inches.
figtitle (str, optional): Figure title.
*_fontsize (int, optional): Font size.
?label (str, optional): Axis labels.
?lim (array_like, optional): Axis min. and max. ``None`` means auto.
?ticks (array_like, optional): Axis tick values. ``None`` means
auto.
?ticks_rotation (float, optional): Tick rotation in degrees.
grid (bool, optional): Whether to draw grid.
labels (list, optional): Labels.
outpath (str, optional): Path to which the plot is saved to. Should
end with ``'.png'``, and ``None`` means to ``const.Dir.tmp``.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#
self.plt = plt
self.legend_fontsize = legend_fontsize
self.legend_loc = legend_loc
self.figsize = figsize
self.figtitle = figtitle
self.figtitle_fontsize = figtitle_fontsize
self.xlabel = xlabel
self.xlabel_fontsize = xlabel_fontsize
self.ylabel = ylabel
self.ylabel_fontsize = ylabel_fontsize
self.zlabel = zlabel
self.zlabel_fontsize = zlabel_fontsize
self.xlim = xlim
self.ylim = ylim
self.zlim = zlim
self.xticks = xticks
self.xticks_rotation = xticks_rotation
self.xticks_fontsize = xticks_fontsize
self.yticks = yticks
self.yticks_rotation = yticks_rotation
self.yticks_fontsize = yticks_fontsize
self.zticks = zticks
self.zticks_rotation = zticks_rotation
self.zticks_fontsize = zticks_fontsize
self.grid = grid
self.labels = labels
self.outpath = outpath
def _savefig(self, outpath, contents_only=False, dpi=None):
# Make directory, if necessary
outdir = dirname(outpath)
makedirs(outdir)
#
if contents_only:
ax = self.plt.gca()
ax.set_position([0, 0, 1, 1])
ax.set_axis_off()
with open_file(outpath, 'wb') as h:
self.plt.savefig(h, dpi=dpi)
else:
with open_file(outpath, 'wb') as h:
self.plt.savefig(h, bbox_inches='tight', dpi=dpi)
def _add_legend(self, plot_objs):
if self.labels is None:
return
n_plot_objs = len(plot_objs)
assert (len(self.labels) == n_plot_objs), (
"Number of labels must equal number of plot objects; "
"use None for object without a label")
for i in range(n_plot_objs):
plot_objs[i].set_label(self.labels[i])
self.plt.legend(fontsize=self.legend_fontsize, loc=self.legend_loc)
def _add_axis_labels(self, ax):
if self.xlabel is not None:
ax.set_xlabel(self.xlabel, fontsize=self.xlabel_fontsize)
if self.ylabel is not None:
ax.set_ylabel(self.ylabel, fontsize=self.ylabel_fontsize)
if self.zlabel is not None:
ax.set_zlabel(self.zlabel, fontsize=self.zlabel_fontsize)
def _set_axis_ticks(self, ax):
# FIXME: if xticks is not provided, xticks_fontsize and xticks_rotation have
# no effect, which shouldn't be the case
if self.xticks is not None:
ax.set_xticklabels(
self.xticks, fontsize=self.xticks_fontsize,
rotation=self.xticks_rotation)
if self.yticks is not None:
ax.set_yticklabels(
self.yticks, fontsize=self.yticks_fontsize,
rotation=self.yticks_rotation)
if self.zticks is not None:
ax.set_zticklabels(
self.zticks, fontsize=self.zticks_fontsize,
rotation=self.zticks_rotation)
def _set_axis_lim(self, ax):
if self.xlim is not None:
ax.set_xlim(*self.xlim)
if self.ylim is not None:
ax.set_ylim(*self.ylim)
if self.zlim is not None:
ax.set_zlim(*self.zlim)
@staticmethod
def _set_axes_equal(ax, xyz):
# plt.axis('equal') not working, hence the hack of creating a cubic
# bounding box
x_data, y_data, z_data = xyz[:, 0], xyz[:, 1], xyz[:, 2]
max_range = np.array([
x_data.max() - x_data.min(),
y_data.max() - y_data.min(),
z_data.max() - z_data.min()]).max()
xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() \
+ 0.5 * (x_data.max() + x_data.min())
yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() \
+ 0.5 * (y_data.max() + y_data.min())
zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() \
+ 0.5 * (z_data.max() + z_data.min())
for xb_, yb_, zb_ in zip(xb, yb, zb):
ax.plot([xb_], [yb_], [zb_], 'w')
def _set_title(self, ax):
if self.figtitle is not None:
ax.set_title(self.figtitle, fontsize=self.figtitle_fontsize)
def bar(self, y, group_width=0.8):
"""Bar plot.
Args:
y (array_like): N-by-M array of N groups, each with M bars,
or N-array of N groups, each with one bar.
group_width (float, optional): Width allocated to each group,
shared by all bars within the group.
Writes
- The bar plot.
"""
outpath = join(const.Dir.tmp, 'bar.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111)
self._set_title(ax)
# Ensure y is 2D, with columns representing values within groups
# and rows across groups
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n, n_grp = y.shape
# Group width is shared by all groups
bar_width = group_width / n_grp
# Assume x is evenly spaced
x = np.arange(n)
# Plot
plot_objs = []
for i in range(n_grp):
x_ = x - 0.5 * group_width + 0.5 * bar_width + i * bar_width
plot_obj = ax.bar(x_, y[:, i], bar_width)
plot_objs.append(plot_obj)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
self._savefig(outpath)
self.plt.close('all')
return outpath
def scatter3d(
self, xyz, colors=None, size=None, equal_axes=False, views=None):
"""3D scatter plot.
Args:
xyz (array_like): N-by-3 array of N points.
colors (array_like or list(str) or str, optional): If N-array, these
values are colormapped. If N-list, its elements should be color
strings. If a single color string, all points use that color.
size (int, optional): Scatter size.
equal_axes (bool, optional): Whether to have the same scale for all
axes.
views (list(tuple), optional): List of elevation-azimuth angle pairs
(in degrees). A good set of views is ``[(30, 0), (30, 45),
(30, 90), (30, 135)]``.
Writes
- One or multiple (if ``views`` is provided) views of the 3D plot.
"""
from mpl_toolkits.mplot3d import Axes3D # noqa; pylint: disable=unused-import
#
outpath = join(const.Dir.tmp, 'scatter3d.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111, projection='3d')
self._set_title(ax)
# Prepare kwargs to scatter()
kwargs = {}
need_colorbar = False
if isinstance(colors, np.ndarray):
kwargs['c'] = colors # will be colormapped with color map
kwargs['cmap'] = 'viridis'
need_colorbar = True
elif colors is not None:
kwargs['c'] = colors
if size is not None:
kwargs['s'] = size
# Plot
plot_objs = ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], **kwargs)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
if equal_axes:
self._set_axes_equal(ax, xyz)
if need_colorbar:
self.plt.colorbar(plot_objs)
# TODO: this seems to mess up equal axes
# Save plot
outpaths = []
if outpath.endswith('.png'):
if views is None:
self._savefig(outpath)
outpaths.append(outpath)
else:
for elev, azim in views:
ax.view_init(elev, azim)
self.plt.draw()
outpath_ = outpath[:-len('.png')] + \
'_elev%03d_azim%03d.png' % (elev, azim)
self._savefig(outpath_)
outpaths.append(outpath_)
else:
raise ValueError("`outpath` must end with '.png'")
self.plt.close('all')
return outpaths
def line(self, xy, width=None, marker=None, marker_size=None):
"""Line/curve plot.
Args:
xy (array_like): N-by-M array of N x-values (first column) and
their corresponding y-values (the remaining M-1 columns).
width (float, optional): Line width.
marker (str, optional): Marker.
marker_size (float, optional): Marker size.
Writes
- The line plot.
"""
outpath = join(const.Dir.tmp, 'line.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111)
self._set_title(ax)
# Prepare kwargs to scatter()
kwargs_list = []
n_lines = xy.shape[1] - 1
for i in range(n_lines):
kwargs = {}
if width is not None:
kwargs['linewidth'] = width
if marker is not None:
kwargs['marker'] = marker
if marker_size is not None:
kwargs['markersize'] = marker_size
kwargs_list.append(kwargs)
# Plot
plot_objs = []
for i in range(n_lines):
plot_obj = self.plt.plot(xy[:, 0], xy[:, 1 + i], **kwargs_list[i])
assert len(plot_obj) == 1
plot_obj = plot_obj[0]
plot_objs.append(plot_obj)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
self._savefig(outpath)
self.plt.close('all')
return outpath
| # pylint: disable=blacklisted-name
from os.path import join, dirname
import numpy as np
from ..log import get_logger
logger = get_logger()
from .. import const
from ..os import makedirs, open_file
class Plot:
def __init__(
self,
legend_fontsize=20,
legend_loc=0,
figsize=(14, 14),
figtitle=None,
figtitle_fontsize=20,
xlabel=None,
xlabel_fontsize=20,
ylabel=None,
ylabel_fontsize=20,
zlabel=None,
zlabel_fontsize=20,
xlim=None,
ylim=None,
zlim=None,
xticks=None,
xticks_fontsize=10,
xticks_rotation=0,
yticks=None,
yticks_fontsize=10,
yticks_rotation=0,
zticks=None,
zticks_fontsize=10,
zticks_rotation=0,
grid=True,
labels=None,
outpath=None):
"""Plotter.
Args:
legend_fontsize (int, optional): Legend font size.
legend_loc (str, optional): Legend location: ``'best'``,
``'upper right'``, ``'lower left'``, ``'right'``,
``'center left'``, ``'lower center'``, ``'upper center'``,
``'center'``, etc. Effective only when ``labels`` is not
``None``.
figsize (tuple, optional): Width and height of the figure in inches.
figtitle (str, optional): Figure title.
*_fontsize (int, optional): Font size.
?label (str, optional): Axis labels.
?lim (array_like, optional): Axis min. and max. ``None`` means auto.
?ticks (array_like, optional): Axis tick values. ``None`` means
auto.
?ticks_rotation (float, optional): Tick rotation in degrees.
grid (bool, optional): Whether to draw grid.
labels (list, optional): Labels.
outpath (str, optional): Path to which the plot is saved to. Should
end with ``'.png'``, and ``None`` means to ``const.Dir.tmp``.
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
#
self.plt = plt
self.legend_fontsize = legend_fontsize
self.legend_loc = legend_loc
self.figsize = figsize
self.figtitle = figtitle
self.figtitle_fontsize = figtitle_fontsize
self.xlabel = xlabel
self.xlabel_fontsize = xlabel_fontsize
self.ylabel = ylabel
self.ylabel_fontsize = ylabel_fontsize
self.zlabel = zlabel
self.zlabel_fontsize = zlabel_fontsize
self.xlim = xlim
self.ylim = ylim
self.zlim = zlim
self.xticks = xticks
self.xticks_rotation = xticks_rotation
self.xticks_fontsize = xticks_fontsize
self.yticks = yticks
self.yticks_rotation = yticks_rotation
self.yticks_fontsize = yticks_fontsize
self.zticks = zticks
self.zticks_rotation = zticks_rotation
self.zticks_fontsize = zticks_fontsize
self.grid = grid
self.labels = labels
self.outpath = outpath
def _savefig(self, outpath, contents_only=False, dpi=None):
# Make directory, if necessary
outdir = dirname(outpath)
makedirs(outdir)
#
if contents_only:
ax = self.plt.gca()
ax.set_position([0, 0, 1, 1])
ax.set_axis_off()
with open_file(outpath, 'wb') as h:
self.plt.savefig(h, dpi=dpi)
else:
with open_file(outpath, 'wb') as h:
self.plt.savefig(h, bbox_inches='tight', dpi=dpi)
def _add_legend(self, plot_objs):
if self.labels is None:
return
n_plot_objs = len(plot_objs)
assert (len(self.labels) == n_plot_objs), (
"Number of labels must equal number of plot objects; "
"use None for object without a label")
for i in range(n_plot_objs):
plot_objs[i].set_label(self.labels[i])
self.plt.legend(fontsize=self.legend_fontsize, loc=self.legend_loc)
def _add_axis_labels(self, ax):
if self.xlabel is not None:
ax.set_xlabel(self.xlabel, fontsize=self.xlabel_fontsize)
if self.ylabel is not None:
ax.set_ylabel(self.ylabel, fontsize=self.ylabel_fontsize)
if self.zlabel is not None:
ax.set_zlabel(self.zlabel, fontsize=self.zlabel_fontsize)
def _set_axis_ticks(self, ax):
# FIXME: if xticks is not provided, xticks_fontsize and xticks_rotation have
# no effect, which shouldn't be the case
if self.xticks is not None:
ax.set_xticklabels(
self.xticks, fontsize=self.xticks_fontsize,
rotation=self.xticks_rotation)
if self.yticks is not None:
ax.set_yticklabels(
self.yticks, fontsize=self.yticks_fontsize,
rotation=self.yticks_rotation)
if self.zticks is not None:
ax.set_zticklabels(
self.zticks, fontsize=self.zticks_fontsize,
rotation=self.zticks_rotation)
def _set_axis_lim(self, ax):
if self.xlim is not None:
ax.set_xlim(*self.xlim)
if self.ylim is not None:
ax.set_ylim(*self.ylim)
if self.zlim is not None:
ax.set_zlim(*self.zlim)
@staticmethod
def _set_axes_equal(ax, xyz):
# plt.axis('equal') not working, hence the hack of creating a cubic
# bounding box
x_data, y_data, z_data = xyz[:, 0], xyz[:, 1], xyz[:, 2]
max_range = np.array([
x_data.max() - x_data.min(),
y_data.max() - y_data.min(),
z_data.max() - z_data.min()]).max()
xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() \
+ 0.5 * (x_data.max() + x_data.min())
yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() \
+ 0.5 * (y_data.max() + y_data.min())
zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() \
+ 0.5 * (z_data.max() + z_data.min())
for xb_, yb_, zb_ in zip(xb, yb, zb):
ax.plot([xb_], [yb_], [zb_], 'w')
def _set_title(self, ax):
if self.figtitle is not None:
ax.set_title(self.figtitle, fontsize=self.figtitle_fontsize)
def bar(self, y, group_width=0.8):
"""Bar plot.
Args:
y (array_like): N-by-M array of N groups, each with M bars,
or N-array of N groups, each with one bar.
group_width (float, optional): Width allocated to each group,
shared by all bars within the group.
Writes
- The bar plot.
"""
outpath = join(const.Dir.tmp, 'bar.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111)
self._set_title(ax)
# Ensure y is 2D, with columns representing values within groups
# and rows across groups
if y.ndim == 1:
y = np.reshape(y, (-1, 1))
n, n_grp = y.shape
# Group width is shared by all groups
bar_width = group_width / n_grp
# Assume x is evenly spaced
x = np.arange(n)
# Plot
plot_objs = []
for i in range(n_grp):
x_ = x - 0.5 * group_width + 0.5 * bar_width + i * bar_width
plot_obj = ax.bar(x_, y[:, i], bar_width)
plot_objs.append(plot_obj)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
self._savefig(outpath)
self.plt.close('all')
return outpath
def scatter3d(
self, xyz, colors=None, size=None, equal_axes=False, views=None):
"""3D scatter plot.
Args:
xyz (array_like): N-by-3 array of N points.
colors (array_like or list(str) or str, optional): If N-array, these
values are colormapped. If N-list, its elements should be color
strings. If a single color string, all points use that color.
size (int, optional): Scatter size.
equal_axes (bool, optional): Whether to have the same scale for all
axes.
views (list(tuple), optional): List of elevation-azimuth angle pairs
(in degrees). A good set of views is ``[(30, 0), (30, 45),
(30, 90), (30, 135)]``.
Writes
- One or multiple (if ``views`` is provided) views of the 3D plot.
"""
from mpl_toolkits.mplot3d import Axes3D # noqa; pylint: disable=unused-import
#
outpath = join(const.Dir.tmp, 'scatter3d.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111, projection='3d')
self._set_title(ax)
# Prepare kwargs to scatter()
kwargs = {}
need_colorbar = False
if isinstance(colors, np.ndarray):
kwargs['c'] = colors # will be colormapped with color map
kwargs['cmap'] = 'viridis'
need_colorbar = True
elif colors is not None:
kwargs['c'] = colors
if size is not None:
kwargs['s'] = size
# Plot
plot_objs = ax.scatter(xyz[:, 0], xyz[:, 1], xyz[:, 2], **kwargs)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
if equal_axes:
self._set_axes_equal(ax, xyz)
if need_colorbar:
self.plt.colorbar(plot_objs)
# TODO: this seems to mess up equal axes
# Save plot
outpaths = []
if outpath.endswith('.png'):
if views is None:
self._savefig(outpath)
outpaths.append(outpath)
else:
for elev, azim in views:
ax.view_init(elev, azim)
self.plt.draw()
outpath_ = outpath[:-len('.png')] + \
'_elev%03d_azim%03d.png' % (elev, azim)
self._savefig(outpath_)
outpaths.append(outpath_)
else:
raise ValueError("`outpath` must end with '.png'")
self.plt.close('all')
return outpaths
def line(self, xy, width=None, marker=None, marker_size=None):
"""Line/curve plot.
Args:
xy (array_like): N-by-M array of N x-values (first column) and
their corresponding y-values (the remaining M-1 columns).
width (float, optional): Line width.
marker (str, optional): Marker.
marker_size (float, optional): Marker size.
Writes
- The line plot.
"""
outpath = join(const.Dir.tmp, 'line.png') if self.outpath is None \
else self.outpath
fig = self.plt.figure(figsize=self.figsize)
ax = fig.add_subplot(111)
self._set_title(ax)
# Prepare kwargs to scatter()
kwargs_list = []
n_lines = xy.shape[1] - 1
for i in range(n_lines):
kwargs = {}
if width is not None:
kwargs['linewidth'] = width
if marker is not None:
kwargs['marker'] = marker
if marker_size is not None:
kwargs['markersize'] = marker_size
kwargs_list.append(kwargs)
# Plot
plot_objs = []
for i in range(n_lines):
plot_obj = self.plt.plot(xy[:, 0], xy[:, 1 + i], **kwargs_list[i])
assert len(plot_obj) == 1
plot_obj = plot_obj[0]
plot_objs.append(plot_obj)
#
self._add_legend(plot_objs)
self.plt.grid(self.grid)
self._add_axis_labels(ax)
self._set_axis_ticks(ax)
self._set_axis_lim(ax)
self._savefig(outpath)
self.plt.close('all')
return outpath
| en | 0.687858 | # pylint: disable=blacklisted-name Plotter. Args: legend_fontsize (int, optional): Legend font size. legend_loc (str, optional): Legend location: ``'best'``, ``'upper right'``, ``'lower left'``, ``'right'``, ``'center left'``, ``'lower center'``, ``'upper center'``, ``'center'``, etc. Effective only when ``labels`` is not ``None``. figsize (tuple, optional): Width and height of the figure in inches. figtitle (str, optional): Figure title. *_fontsize (int, optional): Font size. ?label (str, optional): Axis labels. ?lim (array_like, optional): Axis min. and max. ``None`` means auto. ?ticks (array_like, optional): Axis tick values. ``None`` means auto. ?ticks_rotation (float, optional): Tick rotation in degrees. grid (bool, optional): Whether to draw grid. labels (list, optional): Labels. outpath (str, optional): Path to which the plot is saved to. Should end with ``'.png'``, and ``None`` means to ``const.Dir.tmp``. # # Make directory, if necessary # # FIXME: if xticks is not provided, xticks_fontsize and xticks_rotation have # no effect, which shouldn't be the case # plt.axis('equal') not working, hence the hack of creating a cubic # bounding box Bar plot. Args: y (array_like): N-by-M array of N groups, each with M bars, or N-array of N groups, each with one bar. group_width (float, optional): Width allocated to each group, shared by all bars within the group. Writes - The bar plot. # Ensure y is 2D, with columns representing values within groups # and rows across groups # Group width is shared by all groups # Assume x is evenly spaced # Plot # 3D scatter plot. Args: xyz (array_like): N-by-3 array of N points. colors (array_like or list(str) or str, optional): If N-array, these values are colormapped. If N-list, its elements should be color strings. If a single color string, all points use that color. size (int, optional): Scatter size. equal_axes (bool, optional): Whether to have the same scale for all axes. views (list(tuple), optional): List of elevation-azimuth angle pairs (in degrees). A good set of views is ``[(30, 0), (30, 45), (30, 90), (30, 135)]``. Writes - One or multiple (if ``views`` is provided) views of the 3D plot. # noqa; pylint: disable=unused-import # # Prepare kwargs to scatter() # will be colormapped with color map # Plot # # TODO: this seems to mess up equal axes # Save plot Line/curve plot. Args: xy (array_like): N-by-M array of N x-values (first column) and their corresponding y-values (the remaining M-1 columns). width (float, optional): Line width. marker (str, optional): Marker. marker_size (float, optional): Marker size. Writes - The line plot. # Prepare kwargs to scatter() # Plot # | 2.183945 | 2 |
stagedp/models/action.py | rknaebel/StageDP | 0 | 6615413 | <gh_stars>0
import gzip
import logging
import pickle
from collections import Counter
from operator import itemgetter
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from stagedp.features.extraction import ActionFeatureGenerator
from stagedp.models.state import ParsingState
from stagedp.utils.other import reverse_dict
class ActionClassifier:
def __init__(self, actionxid_map):
self.actionxid_map = actionxid_map
self.idxaction_map = reverse_dict(actionxid_map)
self.model = Pipeline([
('vectorizer', DictVectorizer()),
# ('variance', VarianceThreshold(threshold=0.0001)),
('model', SGDClassifier(loss='log', penalty='l2', average=32, tol=1e-7, max_iter=1000, n_jobs=-1,
class_weight='balanced'))
# ('model', RandomForestClassifier(n_estimators=1000, max_depth=25, min_samples_split=5, min_samples_leaf=3,
# random_state=0, n_jobs=-1))
])
def train(self, rst_tree_instances, brown_clusters):
""" Perform batch-learning on parsing models action classifier
"""
logging.info('Training classifier for action...')
action_fvs, action_labels = list(zip(*self.generate_train_data(rst_tree_instances, brown_clusters)))
self.model.fit(action_fvs, action_labels)
print(self.model.score(action_fvs, action_labels))
action_preds = self.model.predict(action_fvs)
print(classification_report(action_labels, action_preds))
def predict_probs(self, features):
""" predict labels and rank the decision label with their confidence
value, output labels and probabilities
"""
vals = self.model.predict_proba([features])[0]
action_vals = {}
for idx in range(len(self.idxaction_map)):
action_vals[self.idxaction_map[idx]] = vals[idx]
sorted_actions = sorted(action_vals.items(), key=itemgetter(1), reverse=True)
return sorted_actions
def save(self, fname):
""" Save models
"""
if not fname.endswith('.gz'):
fname += '.gz'
data = {'action_clf': self.model,
'actionxid_map': self.actionxid_map}
with gzip.open(fname, 'wb') as fout:
pickle.dump(data, fout)
logging.info('Save action classifier into file: '
'{} with {} features and {} actions.'.format(fname, self.model['model'].n_features_in_,
len(self.actionxid_map)))
@staticmethod
def load(fname):
""" Load models
"""
data = pickle.load(gzip.open(fname, 'rb'))
actionxid_map = data['actionxid_map']
clf = ActionClassifier(actionxid_map)
clf.model = data['action_clf']
logging.info('Load action classifier from file: '
'{} with {} features and {} actions.'.format(fname, clf.model['model'].n_features_in_,
len(actionxid_map)))
return clf
@staticmethod
def from_data(rst_tree_instances, brown_clusters):
action_cnt = Counter(action for rst_tree in rst_tree_instances
for features, action in generate_action_samples(rst_tree, brown_clusters))
action_map = {a: i for i, a in enumerate(action_cnt)}
logging.info('{} types of actions: {}'.format(len(action_map), action_map.keys()))
for action, cnt in action_cnt.items():
logging.info('{}\t{}'.format(action, cnt))
return ActionClassifier(action_map)
def generate_train_data(self, rst_tree_instances, brown_clusters):
for rst_tree in rst_tree_instances:
for feats, action in generate_action_samples(rst_tree, brown_clusters):
yield feats, self.actionxid_map[action]
def generate_action_samples(rst_tree, bcvocab):
""" Generate action samples from an binary RST tree
:type bcvocab: dict
:param bcvocab: brown clusters of words
"""
# post_nodelist = RstTree.postorder_DFT(rst_tree.tree, [])
# action_list = []
# relation_list = []
action_hist = []
# Initialize queue and stack
queue = rst_tree.get_edu_node()
stack = []
# Start simulating the shift-reduce parsing
sr_parser = ParsingState(stack, queue)
for node in rst_tree.postorder():
if (node.lnode is None) and (node.rnode is None):
action = ('Shift', None)
elif (node.lnode is not None) and (node.rnode is not None):
form = node.form
action = ('Reduce', form)
else:
raise ValueError("Can not decode Shift-Reduce action")
stack, queue = sr_parser.get_status()
# Generate features
action_feats = ActionFeatureGenerator(stack, queue, action_hist, rst_tree.doc, bcvocab).gen_features()
yield action_feats, action
# Change status of stack/queue
# action and relation are necessary here to avoid change rst_trees
sr_parser.operate(action)
action_hist.append(action)
| import gzip
import logging
import pickle
from collections import Counter
from operator import itemgetter
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import classification_report
from sklearn.pipeline import Pipeline
from stagedp.features.extraction import ActionFeatureGenerator
from stagedp.models.state import ParsingState
from stagedp.utils.other import reverse_dict
class ActionClassifier:
def __init__(self, actionxid_map):
self.actionxid_map = actionxid_map
self.idxaction_map = reverse_dict(actionxid_map)
self.model = Pipeline([
('vectorizer', DictVectorizer()),
# ('variance', VarianceThreshold(threshold=0.0001)),
('model', SGDClassifier(loss='log', penalty='l2', average=32, tol=1e-7, max_iter=1000, n_jobs=-1,
class_weight='balanced'))
# ('model', RandomForestClassifier(n_estimators=1000, max_depth=25, min_samples_split=5, min_samples_leaf=3,
# random_state=0, n_jobs=-1))
])
def train(self, rst_tree_instances, brown_clusters):
""" Perform batch-learning on parsing models action classifier
"""
logging.info('Training classifier for action...')
action_fvs, action_labels = list(zip(*self.generate_train_data(rst_tree_instances, brown_clusters)))
self.model.fit(action_fvs, action_labels)
print(self.model.score(action_fvs, action_labels))
action_preds = self.model.predict(action_fvs)
print(classification_report(action_labels, action_preds))
def predict_probs(self, features):
""" predict labels and rank the decision label with their confidence
value, output labels and probabilities
"""
vals = self.model.predict_proba([features])[0]
action_vals = {}
for idx in range(len(self.idxaction_map)):
action_vals[self.idxaction_map[idx]] = vals[idx]
sorted_actions = sorted(action_vals.items(), key=itemgetter(1), reverse=True)
return sorted_actions
def save(self, fname):
""" Save models
"""
if not fname.endswith('.gz'):
fname += '.gz'
data = {'action_clf': self.model,
'actionxid_map': self.actionxid_map}
with gzip.open(fname, 'wb') as fout:
pickle.dump(data, fout)
logging.info('Save action classifier into file: '
'{} with {} features and {} actions.'.format(fname, self.model['model'].n_features_in_,
len(self.actionxid_map)))
@staticmethod
def load(fname):
""" Load models
"""
data = pickle.load(gzip.open(fname, 'rb'))
actionxid_map = data['actionxid_map']
clf = ActionClassifier(actionxid_map)
clf.model = data['action_clf']
logging.info('Load action classifier from file: '
'{} with {} features and {} actions.'.format(fname, clf.model['model'].n_features_in_,
len(actionxid_map)))
return clf
@staticmethod
def from_data(rst_tree_instances, brown_clusters):
action_cnt = Counter(action for rst_tree in rst_tree_instances
for features, action in generate_action_samples(rst_tree, brown_clusters))
action_map = {a: i for i, a in enumerate(action_cnt)}
logging.info('{} types of actions: {}'.format(len(action_map), action_map.keys()))
for action, cnt in action_cnt.items():
logging.info('{}\t{}'.format(action, cnt))
return ActionClassifier(action_map)
def generate_train_data(self, rst_tree_instances, brown_clusters):
for rst_tree in rst_tree_instances:
for feats, action in generate_action_samples(rst_tree, brown_clusters):
yield feats, self.actionxid_map[action]
def generate_action_samples(rst_tree, bcvocab):
""" Generate action samples from an binary RST tree
:type bcvocab: dict
:param bcvocab: brown clusters of words
"""
# post_nodelist = RstTree.postorder_DFT(rst_tree.tree, [])
# action_list = []
# relation_list = []
action_hist = []
# Initialize queue and stack
queue = rst_tree.get_edu_node()
stack = []
# Start simulating the shift-reduce parsing
sr_parser = ParsingState(stack, queue)
for node in rst_tree.postorder():
if (node.lnode is None) and (node.rnode is None):
action = ('Shift', None)
elif (node.lnode is not None) and (node.rnode is not None):
form = node.form
action = ('Reduce', form)
else:
raise ValueError("Can not decode Shift-Reduce action")
stack, queue = sr_parser.get_status()
# Generate features
action_feats = ActionFeatureGenerator(stack, queue, action_hist, rst_tree.doc, bcvocab).gen_features()
yield action_feats, action
# Change status of stack/queue
# action and relation are necessary here to avoid change rst_trees
sr_parser.operate(action)
action_hist.append(action) | en | 0.693413 | # ('variance', VarianceThreshold(threshold=0.0001)), # ('model', RandomForestClassifier(n_estimators=1000, max_depth=25, min_samples_split=5, min_samples_leaf=3, # random_state=0, n_jobs=-1)) Perform batch-learning on parsing models action classifier predict labels and rank the decision label with their confidence value, output labels and probabilities Save models Load models Generate action samples from an binary RST tree :type bcvocab: dict :param bcvocab: brown clusters of words # post_nodelist = RstTree.postorder_DFT(rst_tree.tree, []) # action_list = [] # relation_list = [] # Initialize queue and stack # Start simulating the shift-reduce parsing # Generate features # Change status of stack/queue # action and relation are necessary here to avoid change rst_trees | 2.305685 | 2 |
pyspedas/analysis/wavelet.py | pulupa/pyspedas | 75 | 6615414 | <gh_stars>10-100
"""
Apply a wavelet transformation to every component of a tplot variable.
Notes
-----
Similar to wav_data.pro in IDL SPEDAS.
For pywavelets library, see:
https://pywavelets.readthedocs.io/en/latest/ref/cwt.html
For an example, see:
http://spedas.org/wiki/index.php?title=Wavelet
"""
import numpy as np
import pywt
import pytplot
def wavelet(names, new_names=None, suffix='_pow', wavename='morl', scales=None,
method='fft', sampling_period=1.0):
"""
Find the wavelet transofrmation of a tplot variable.
Parameters
----------
names: str/list of str
List of pytplot names.
new_names: str/list of str, optional
List of new_names for pytplot variables.
If not given, then a suffix is applied.
suffix: str, optional
A suffix to apply. Default is '_pow'.
wavename: str, optional
The name of the continous wavelet function to apply.
Examples: 'gaus1', 'morl', 'cmorlB-C'.
scales: list of float, optional
The wavelet scales to use.
method: str, optional
Either ‘fft’ for frequency domain convolution,
or 'conv' for numpy.convolve.
sampling_period: float, optional
The sampling period for the frequencies output.
Returns
-------
A list of pytplot variables that contain the wavelet power.
"""
varnames = pytplot.split_vec(names)
powervar = []
if len(varnames) < 1:
print('wavelet error: No pytplot names were provided.')
return
if scales is None:
scales = np.arange(1, 128)
for i, old in enumerate(varnames):
old = varnames[i]
if (new_names is not None) and (len(new_names) == len(varnames)):
new = new_names[i]
else:
new = old + suffix
alldata = pytplot.get_data(old)
time = alldata[0]
len_time = len(time)
data = alldata[1]
if len_time < 2:
print('wavelet error: Not enought data points for ' + old)
continue
coef, freqs = pywt.cwt(data, scales=scales, wavelet=wavename,
method=method, sampling_period=sampling_period)
power = np.abs(coef)**2
power = power.transpose()
pytplot.store_data(new, data={'x': time, 'y': power, 'v': freqs})
pytplot.options(new, 'spec', 1)
powervar.append(new)
print('wavelet was applied to: ' + new)
return powervar
| """
Apply a wavelet transformation to every component of a tplot variable.
Notes
-----
Similar to wav_data.pro in IDL SPEDAS.
For pywavelets library, see:
https://pywavelets.readthedocs.io/en/latest/ref/cwt.html
For an example, see:
http://spedas.org/wiki/index.php?title=Wavelet
"""
import numpy as np
import pywt
import pytplot
def wavelet(names, new_names=None, suffix='_pow', wavename='morl', scales=None,
method='fft', sampling_period=1.0):
"""
Find the wavelet transofrmation of a tplot variable.
Parameters
----------
names: str/list of str
List of pytplot names.
new_names: str/list of str, optional
List of new_names for pytplot variables.
If not given, then a suffix is applied.
suffix: str, optional
A suffix to apply. Default is '_pow'.
wavename: str, optional
The name of the continous wavelet function to apply.
Examples: 'gaus1', 'morl', 'cmorlB-C'.
scales: list of float, optional
The wavelet scales to use.
method: str, optional
Either ‘fft’ for frequency domain convolution,
or 'conv' for numpy.convolve.
sampling_period: float, optional
The sampling period for the frequencies output.
Returns
-------
A list of pytplot variables that contain the wavelet power.
"""
varnames = pytplot.split_vec(names)
powervar = []
if len(varnames) < 1:
print('wavelet error: No pytplot names were provided.')
return
if scales is None:
scales = np.arange(1, 128)
for i, old in enumerate(varnames):
old = varnames[i]
if (new_names is not None) and (len(new_names) == len(varnames)):
new = new_names[i]
else:
new = old + suffix
alldata = pytplot.get_data(old)
time = alldata[0]
len_time = len(time)
data = alldata[1]
if len_time < 2:
print('wavelet error: Not enought data points for ' + old)
continue
coef, freqs = pywt.cwt(data, scales=scales, wavelet=wavename,
method=method, sampling_period=sampling_period)
power = np.abs(coef)**2
power = power.transpose()
pytplot.store_data(new, data={'x': time, 'y': power, 'v': freqs})
pytplot.options(new, 'spec', 1)
powervar.append(new)
print('wavelet was applied to: ' + new)
return powervar | en | 0.522018 | Apply a wavelet transformation to every component of a tplot variable. Notes ----- Similar to wav_data.pro in IDL SPEDAS. For pywavelets library, see: https://pywavelets.readthedocs.io/en/latest/ref/cwt.html For an example, see: http://spedas.org/wiki/index.php?title=Wavelet Find the wavelet transofrmation of a tplot variable. Parameters ---------- names: str/list of str List of pytplot names. new_names: str/list of str, optional List of new_names for pytplot variables. If not given, then a suffix is applied. suffix: str, optional A suffix to apply. Default is '_pow'. wavename: str, optional The name of the continous wavelet function to apply. Examples: 'gaus1', 'morl', 'cmorlB-C'. scales: list of float, optional The wavelet scales to use. method: str, optional Either ‘fft’ for frequency domain convolution, or 'conv' for numpy.convolve. sampling_period: float, optional The sampling period for the frequencies output. Returns ------- A list of pytplot variables that contain the wavelet power. | 3.438223 | 3 |
test_celltype_specificity/test_mark_specificity.py | ernstlab/full_stack_chromHMM | 0 | 6615415 | <gh_stars>0
import pandas as pd
import numpy as np
import os
import sys
import analysis_helper as helper
from scipy.stats import mannwhitneyu
emission_fn = "/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/chromHMM_model/model_100_state/emissions_100.txt"
meta_fn = '/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/ROADMAP_metadata_july2013.csv'
output_folder = "/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/chromHMM_model/model_100_state/chrom_mark_spec_test/"
ALPHA = 0.01
def color_significant_pval(pval_row, threshold):
# pval_row: a rows of p-values
white_color = '#ffffff' # white
blue_color = '#85BCE5' # light blue
red_color = '#FF7F7F' # light red
results = pd.Series(['background-color: %s' % white_color for x in pval_row])
results.index = pval_row.index
# change colors to blue if below the thresholds
below_threshold_indices = (pval_row <= threshold)
results[below_threshold_indices] = 'background-color: %s' % blue_color
results[pval_row == pval_row.min()] = 'background-color: %s' % red_color
return results
def get_emission_matrix_df (emission_fn, meta_fn, num_state):
emission_df = pd.read_csv(emission_fn, header = 0, index_col = 0, sep = '\t')
emission_df = emission_df.transpose()
emission_df.reset_index(inplace = True)
emission_df.columns = ['experiment'] + map(lambda x: 'S' + str(x+1), range(num_state))
emission_df['chrom_mark'] = emission_df['experiment'].apply(lambda x: x.split('-')[1])
emission_df['ct'] = emission_df['experiment'].apply(lambda x: x.split('-')[0])
meta_df = pd.read_csv(meta_fn, header = 0, sep = ',')
meta_df = meta_df.rename(columns = {'Epigenome ID (EID)' : 'ct'})
meta_df = meta_df[['ct', 'GROUP', 'ANATOMY']]
emission_df = pd.merge(emission_df, meta_df, how = 'left', left_on = 'ct', right_on = 'ct')
return emission_df
def test_chrom_mark_specificity (emission_df, num_state):
total_number_test = 0
count_mark = emission_df.chrom_mark.value_counts()
marks_to_test = count_mark.index[count_mark > 15]
result_df = pd.DataFrame(columns = ['state'] + list(marks_to_test))
# result_df: columns: marks that we want to test the significance of higher emission probabilities, rows: different states
result_df['state'] = map(lambda x: 'S' + str(x+1), range(num_state))
num_tests = num_state * len(marks_to_test)
for chromM in marks_to_test:
this_chromM_df = emission_df[emission_df['chrom_mark'] == chromM]
other_chromM_df = emission_df[emission_df['chrom_mark'] != chromM]
this_mark_results = []
for state_index in range(num_state):
x = this_chromM_df['S' + str(state_index + 1)]
y = other_chromM_df['S' + str(state_index + 1)]
t = mannwhitneyu(x, y, use_continuity = False, alternative = 'greater')
this_mark_results.append(t.pvalue)
result_df[chromM] = this_mark_results
return result_df, num_tests
def paint_result_excel(result_df, num_tests, output_fn):
threshold = ALPHA / float(num_tests)
colored_df = result_df.style.apply(lambda x: color_significant_pval(x, threshold), axis = 1, subset = pd.IndexSlice[:, result_df.columns[1:]]) #exclude coloring the first column which is state annotation
writer = pd.ExcelWriter(output_fn, engine = 'xlsxwriter')
colored_df.to_excel(writer, sheet_name = 'mark_specificity')
writer.save()
def main():
num_state = 100
emission_df = get_emission_matrix_df(emission_fn, meta_fn, num_state)
print "Done getting emission_df"
result_df, num_tests = test_chrom_mark_specificity(emission_df, num_state)
print "Done getting result_df"
output_fn = os.path.join(output_folder, 'test_chrom_mark_specificity.xlsx')
paint_result_excel(result_df, num_tests, output_fn)
print "Done paint_result_excel!"
main() | import pandas as pd
import numpy as np
import os
import sys
import analysis_helper as helper
from scipy.stats import mannwhitneyu
emission_fn = "/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/chromHMM_model/model_100_state/emissions_100.txt"
meta_fn = '/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/ROADMAP_metadata_july2013.csv'
output_folder = "/Users/vuthaiha/Desktop/window_hoff/ROADMAP_aligned_reads/chromHMM_model/model_100_state/chrom_mark_spec_test/"
ALPHA = 0.01
def color_significant_pval(pval_row, threshold):
# pval_row: a rows of p-values
white_color = '#ffffff' # white
blue_color = '#85BCE5' # light blue
red_color = '#FF7F7F' # light red
results = pd.Series(['background-color: %s' % white_color for x in pval_row])
results.index = pval_row.index
# change colors to blue if below the thresholds
below_threshold_indices = (pval_row <= threshold)
results[below_threshold_indices] = 'background-color: %s' % blue_color
results[pval_row == pval_row.min()] = 'background-color: %s' % red_color
return results
def get_emission_matrix_df (emission_fn, meta_fn, num_state):
emission_df = pd.read_csv(emission_fn, header = 0, index_col = 0, sep = '\t')
emission_df = emission_df.transpose()
emission_df.reset_index(inplace = True)
emission_df.columns = ['experiment'] + map(lambda x: 'S' + str(x+1), range(num_state))
emission_df['chrom_mark'] = emission_df['experiment'].apply(lambda x: x.split('-')[1])
emission_df['ct'] = emission_df['experiment'].apply(lambda x: x.split('-')[0])
meta_df = pd.read_csv(meta_fn, header = 0, sep = ',')
meta_df = meta_df.rename(columns = {'Epigenome ID (EID)' : 'ct'})
meta_df = meta_df[['ct', 'GROUP', 'ANATOMY']]
emission_df = pd.merge(emission_df, meta_df, how = 'left', left_on = 'ct', right_on = 'ct')
return emission_df
def test_chrom_mark_specificity (emission_df, num_state):
total_number_test = 0
count_mark = emission_df.chrom_mark.value_counts()
marks_to_test = count_mark.index[count_mark > 15]
result_df = pd.DataFrame(columns = ['state'] + list(marks_to_test))
# result_df: columns: marks that we want to test the significance of higher emission probabilities, rows: different states
result_df['state'] = map(lambda x: 'S' + str(x+1), range(num_state))
num_tests = num_state * len(marks_to_test)
for chromM in marks_to_test:
this_chromM_df = emission_df[emission_df['chrom_mark'] == chromM]
other_chromM_df = emission_df[emission_df['chrom_mark'] != chromM]
this_mark_results = []
for state_index in range(num_state):
x = this_chromM_df['S' + str(state_index + 1)]
y = other_chromM_df['S' + str(state_index + 1)]
t = mannwhitneyu(x, y, use_continuity = False, alternative = 'greater')
this_mark_results.append(t.pvalue)
result_df[chromM] = this_mark_results
return result_df, num_tests
def paint_result_excel(result_df, num_tests, output_fn):
threshold = ALPHA / float(num_tests)
colored_df = result_df.style.apply(lambda x: color_significant_pval(x, threshold), axis = 1, subset = pd.IndexSlice[:, result_df.columns[1:]]) #exclude coloring the first column which is state annotation
writer = pd.ExcelWriter(output_fn, engine = 'xlsxwriter')
colored_df.to_excel(writer, sheet_name = 'mark_specificity')
writer.save()
def main():
num_state = 100
emission_df = get_emission_matrix_df(emission_fn, meta_fn, num_state)
print "Done getting emission_df"
result_df, num_tests = test_chrom_mark_specificity(emission_df, num_state)
print "Done getting result_df"
output_fn = os.path.join(output_folder, 'test_chrom_mark_specificity.xlsx')
paint_result_excel(result_df, num_tests, output_fn)
print "Done paint_result_excel!"
main() | en | 0.773057 | # pval_row: a rows of p-values # white # light blue # light red # change colors to blue if below the thresholds # result_df: columns: marks that we want to test the significance of higher emission probabilities, rows: different states #exclude coloring the first column which is state annotation | 2.546228 | 3 |
tf_fastmri_data/preprocessing_utils/size_adjustment.py | chaithyagr/tf-fastmri-data | 1 | 6615416 | import tensorflow as tf
def pad(x, size):
shape = tf.shape(x)[-1]
to_pad = size[-1] - shape
padding = [(0, 0) for _ in range(len(tf.shape(x)) - 1)]
padding.append((to_pad//2, to_pad//2))
padded_x = tf.pad(
x,
padding,
)
return padded_x
def crop(x, size):
shape = tf.shape(x)[-2:]
to_crop = shape - size
cropped_x = x[
...,
to_crop[0]//2:shape[0]-to_crop[0]//2,
to_crop[1]//2:shape[1]-to_crop[1]//2,
]
return cropped_x
def adjust_image_size(image, target_image_size, multicoil=False):
"""Resize an image to a target size using centered cropping or padding
Args:
- image (tf.Tensor): an image with dimensions (n_slices, n_coils, height, width)
- target_image_size (list or tuple): the height and width for the output image
- multicoil (bool): defaults to False. Whether the image has a coil dimension.
Returns:
- tf.Tensor: a size-adjusted image
"""
height = tf.shape(image)[-2]
width = tf.shape(image)[-1]
n_slices = tf.shape(image)[0]
transpose_axis = [1, 2, 0] if not multicoil else [2, 3, 0, 1]
transposed_image = tf.transpose(image, transpose_axis)
reshaped_image = tf.reshape(transposed_image, [height, width, -1]) # 3D tensors accepted
# with channels dimension last
target_height = target_image_size[0]
target_width = target_image_size[1]
padded_image = tf.image.resize_with_crop_or_pad(
reshaped_image,
target_height,
target_width,
)
if multicoil:
final_shape = [target_height, target_width, n_slices, -1]
else:
final_shape = [target_height, target_width, n_slices]
reshaped_padded_image = tf.reshape(padded_image, final_shape)
transpose_axis = [2, 0, 1] if not multicoil else [2, 3, 0, 1]
transpose_padded_image = tf.transpose(reshaped_padded_image, transpose_axis)
return transpose_padded_image
| import tensorflow as tf
def pad(x, size):
shape = tf.shape(x)[-1]
to_pad = size[-1] - shape
padding = [(0, 0) for _ in range(len(tf.shape(x)) - 1)]
padding.append((to_pad//2, to_pad//2))
padded_x = tf.pad(
x,
padding,
)
return padded_x
def crop(x, size):
shape = tf.shape(x)[-2:]
to_crop = shape - size
cropped_x = x[
...,
to_crop[0]//2:shape[0]-to_crop[0]//2,
to_crop[1]//2:shape[1]-to_crop[1]//2,
]
return cropped_x
def adjust_image_size(image, target_image_size, multicoil=False):
"""Resize an image to a target size using centered cropping or padding
Args:
- image (tf.Tensor): an image with dimensions (n_slices, n_coils, height, width)
- target_image_size (list or tuple): the height and width for the output image
- multicoil (bool): defaults to False. Whether the image has a coil dimension.
Returns:
- tf.Tensor: a size-adjusted image
"""
height = tf.shape(image)[-2]
width = tf.shape(image)[-1]
n_slices = tf.shape(image)[0]
transpose_axis = [1, 2, 0] if not multicoil else [2, 3, 0, 1]
transposed_image = tf.transpose(image, transpose_axis)
reshaped_image = tf.reshape(transposed_image, [height, width, -1]) # 3D tensors accepted
# with channels dimension last
target_height = target_image_size[0]
target_width = target_image_size[1]
padded_image = tf.image.resize_with_crop_or_pad(
reshaped_image,
target_height,
target_width,
)
if multicoil:
final_shape = [target_height, target_width, n_slices, -1]
else:
final_shape = [target_height, target_width, n_slices]
reshaped_padded_image = tf.reshape(padded_image, final_shape)
transpose_axis = [2, 0, 1] if not multicoil else [2, 3, 0, 1]
transpose_padded_image = tf.transpose(reshaped_padded_image, transpose_axis)
return transpose_padded_image
| en | 0.725692 | Resize an image to a target size using centered cropping or padding Args: - image (tf.Tensor): an image with dimensions (n_slices, n_coils, height, width) - target_image_size (list or tuple): the height and width for the output image - multicoil (bool): defaults to False. Whether the image has a coil dimension. Returns: - tf.Tensor: a size-adjusted image # 3D tensors accepted # with channels dimension last | 3.263193 | 3 |
matrix_monzo/http/__init__.py | babolivier/matrix-monzo | 2 | 6615417 | import logging
import os
from aiohttp import web
from matrix_monzo.http.handlers.auth_callback import AuthCallbackHandler
from matrix_monzo.utils.instance import Instance
logger = logging.getLogger(__name__)
async def start_http(instance: Instance):
app = web.Application()
current_dir = os.path.dirname(__file__)
static_path = os.path.join(current_dir, "../../res/static")
auth_callback = AuthCallbackHandler(instance)
app.add_routes([
web.static("/static", static_path),
web.get("/auth_callback", auth_callback.handler),
])
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, instance.config.http_address, instance.config.http_port)
await site.start()
logger.info("Started HTTP site")
| import logging
import os
from aiohttp import web
from matrix_monzo.http.handlers.auth_callback import AuthCallbackHandler
from matrix_monzo.utils.instance import Instance
logger = logging.getLogger(__name__)
async def start_http(instance: Instance):
app = web.Application()
current_dir = os.path.dirname(__file__)
static_path = os.path.join(current_dir, "../../res/static")
auth_callback = AuthCallbackHandler(instance)
app.add_routes([
web.static("/static", static_path),
web.get("/auth_callback", auth_callback.handler),
])
runner = web.AppRunner(app)
await runner.setup()
site = web.TCPSite(runner, instance.config.http_address, instance.config.http_port)
await site.start()
logger.info("Started HTTP site")
| none | 1 | 2.063775 | 2 | |
lokki/data_transform/__init__.py | bzhanglab/Lokki | 0 | 6615418 | <reponame>bzhanglab/Lokki
from .data_transformation_choice import DataTransformationChoice
from .log import Log
from .void import NoPreprocessing
from .zscore import ZScore
| from .data_transformation_choice import DataTransformationChoice
from .log import Log
from .void import NoPreprocessing
from .zscore import ZScore | none | 1 | 1.02159 | 1 | |
C++/May Long/Isolation Centers.py | Teja-09/CodeChef | 0 | 6615419 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Sat May 2 13:09:56 2020
@author: teja
"""
t = int(input())
while(t):
n, q = map(int, input().split())
li = list(input())
se = set(li)
dic = {}
for i in se:
dic[i] = li.count(i)
for i in range(q):
c = int(input())
ans = 0
for val in dic.values():
if val > c:
ans = ans + (val - c)
print(ans)
t-=1
| # -*- coding: utf-8 -*-
"""
Created on Sat May 2 13:09:56 2020
@author: teja
"""
t = int(input())
while(t):
n, q = map(int, input().split())
li = list(input())
se = set(li)
dic = {}
for i in se:
dic[i] = li.count(i)
for i in range(q):
c = int(input())
ans = 0
for val in dic.values():
if val > c:
ans = ans + (val - c)
print(ans)
t-=1 | en | 0.747116 | # -*- coding: utf-8 -*- Created on Sat May 2 13:09:56 2020 @author: teja | 2.770755 | 3 |
python/plrf.py | wittrup/crap | 1 | 6615420 | def csc(rawstr): #check sum control
checksum = ord(rawstr[0])
for char in rawstr[1:-2]:
checksum += ord(char)
return hex(checksum & 0xFF)[2:] == rawstr[-2:].lower()
def r2v(rawstr):
stchr = rawstr[0]
value = rawstr[1:-2]
if stchr in ['d', 'a', 'e']:
value = int(value) * 0.1
if stchr is 'd':
value = value * 0.1
elif stchr is 'e' and value > 3141.5:
value = value - 6283.2
return (stchr, value)
if __name__ == '__main__':
pattern = '(\w)(\d{2}[\dE]\d{3})([0-9A-Fa-f]{2})'
examples = ['d00575095', 'a03825699', 'e0614569B', 'R00E3018B', 'a04372495', 'e06204495', 'd01010086', 'C00E21782', 'C00E21782']
for rawstr in examples:
print(csc(rawstr), rawstr, r2v(rawstr))
| def csc(rawstr): #check sum control
checksum = ord(rawstr[0])
for char in rawstr[1:-2]:
checksum += ord(char)
return hex(checksum & 0xFF)[2:] == rawstr[-2:].lower()
def r2v(rawstr):
stchr = rawstr[0]
value = rawstr[1:-2]
if stchr in ['d', 'a', 'e']:
value = int(value) * 0.1
if stchr is 'd':
value = value * 0.1
elif stchr is 'e' and value > 3141.5:
value = value - 6283.2
return (stchr, value)
if __name__ == '__main__':
pattern = '(\w)(\d{2}[\dE]\d{3})([0-9A-Fa-f]{2})'
examples = ['d00575095', 'a03825699', 'e0614569B', 'R00E3018B', 'a04372495', 'e06204495', 'd01010086', 'C00E21782', 'C00E21782']
for rawstr in examples:
print(csc(rawstr), rawstr, r2v(rawstr))
| en | 0.477959 | #check sum control | 3.334414 | 3 |
galley/worker.py | pybee/galley | 21 | 6615421 | <filename>galley/worker.py<gh_stars>10-100
from collections import namedtuple
import re
import os
from sphinx.application import Sphinx
######################################################################
# Command types
######################################################################
ReloadConfig = namedtuple('ReloadConfig', [])
BuildAll = namedtuple('BuildAll', [])
BuildSpecific = namedtuple('BuildSpecific', ['filenames'])
Quit = namedtuple('Quit', [])
######################################################################
# Output message types
######################################################################
Output = namedtuple('Output', ['message'])
WarningOutput = namedtuple('Warning', ['filename', 'lineno', 'message'])
Progress = namedtuple('Progress', ['stage', 'progress', 'context'])
InitializationStart = namedtuple('InitializationStart', [])
InitializationEnd = namedtuple('InitializationEnd', ['extension'])
BuildStart = namedtuple('BuildStart', ['filenames'])
BuildEnd = namedtuple('BuildEnd', ['filenames'])
######################################################################
# Sphinx handler
######################################################################
class ANSIOutputHandler(object):
"A File-like object that puts output onto a queue, stripping ANSI codes."
def __init__(self, queue):
self.queue = queue
self.buffer = []
def write(self, data):
"Write the given data to the buffer"
start = 0
end = 0
# The data provided by Sphinx may contain ANSI escape sequences. Strip them out.
while end < len(data):
ch = data[end]
if ch == '\x1b':
# Insert any accumulated text with the current mode
self.buffer.append(data[start:end])
# Read the escape code
# mode = data[end + 1]
end = end + 2
params = []
while ord(data[end]) not in range(64, 127):
param = []
while ord(data[end]) not in range(64, 127) and data[end] != ';':
param.append(data[end])
end = end + 1
params.append(int(''.join(param)))
if data[end] == ';':
end = end + 1
# command = data[end]
end = end + 1
start = end
elif ch == '\r' or ch == '\n':
self.buffer.append(data[start:end])
self.flush()
start = end + 1
end = end + 1
self.buffer.append(data[start:end])
def flush(self):
"Flush the current buffer"
if self.buffer:
self.emit(''.join(self.buffer))
self.buffer = []
def emit(self, content):
"""Internal method to actually put the content onto the output queue
Override on subclasses to do parsed content handling.
"""
self.queue.put(Output(message=content))
SIMPLE_PROGRESS_RE = re.compile(r'(.+)\.\.\.$')
PERCENT_PROGRESS_RE = re.compile(r'([\w\s]+)\.\.\. \[([\s\d]{3})\%\] (.+)')
class SphinxStatusHandler(ANSIOutputHandler):
"A Sphinx output handler for normal status update, stripping ANSI codes."
def __init__(self, *args, **kwargs):
super(SphinxStatusHandler, self).__init__(*args, **kwargs)
self.task = None
def emit(self, content):
content = content.strip()
if content:
# Always output the status literally
self.queue.put(Output(content))
# Also check for certain key content, and output special messages
if self.task:
# There is an outstanding simple task. If we've got output, it
# means we've completed that task.
self.queue.put(Progress(stage=self.task, progress=100, context=None))
self.task = None
else:
# Check for simple progress: 'doing stuff...'
progress_match = SIMPLE_PROGRESS_RE.match(content)
if progress_match:
self.task = progress_match.group(1)
else:
# Check for percent progress: 'doing stuff...'
progress_match = PERCENT_PROGRESS_RE.match(content)
if progress_match:
self.queue.put(
Progress(
stage=progress_match.group(1),
progress=int(progress_match.group(2)),
context=progress_match.group(3)
)
)
class SphinxWarningHandler(ANSIOutputHandler):
"""A Sphinx output handler for, stripping ANSI codes..
Parses warning content to extract context.
"""
def emit(self, content):
content = content.strip()
if content:
if content.startswith('WARNING: '):
self.queue.put(WarningOutput(filename=None, lineno=None, message=content[9:]))
else:
parts = content.split(':')
self.queue.put(
WarningOutput(
filename=parts[0],
lineno=int(parts[1]) if parts[1] else None,
message=':'.join(parts[3:]).strip())
)
def sphinx_worker(base_path, work_queue, output_queue):
"A background worker thread performing Sphinx compilations"
# Set up the Sphinx instance
srcdir = base_path
confdir = srcdir
outdir = os.path.join(srcdir, '_build', 'json')
freshenv = False
warningiserror = False
buildername = 'json'
# verbosity = 0
# parallel = 0
status = SphinxStatusHandler(output_queue)
warning = SphinxWarningHandler(output_queue)
# error = sys.stderr
# warnfile = None
confoverrides = {}
tags = []
doctreedir = os.path.join(outdir, '.doctrees')
output_queue.put(InitializationStart())
sphinx = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
confoverrides, status, warning, freshenv,
warningiserror, tags)
output_queue.put(InitializationEnd(extension=sphinx.config.source_suffix))
quit = False
while not quit:
# Get the next command off the work queue
cmd = work_queue.get(block=True)
if isinstance(cmd, Quit):
quit = True
elif isinstance(cmd, ReloadConfig):
output_queue.put(InitializationStart())
freshenv = True
sphinx = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
confoverrides, status, warning, freshenv,
warningiserror, tags)
output_queue.put(InitializationEnd(extension=sphinx.config.source_suffix))
elif isinstance(cmd, BuildAll):
output_queue.put(BuildStart(filenames=None))
sphinx.builder.build_all()
output_queue.put(BuildEnd(filenames=None))
elif isinstance(cmd, BuildSpecific):
output_queue.put(BuildStart(filenames=cmd.filenames))
sphinx.builder.build_specific(cmd.filenames)
output_queue.put(BuildEnd(filenames=cmd.filenames))
# Reset the warning count so that they don't accumulate between builds.
sphinx._warncount = 0
| <filename>galley/worker.py<gh_stars>10-100
from collections import namedtuple
import re
import os
from sphinx.application import Sphinx
######################################################################
# Command types
######################################################################
ReloadConfig = namedtuple('ReloadConfig', [])
BuildAll = namedtuple('BuildAll', [])
BuildSpecific = namedtuple('BuildSpecific', ['filenames'])
Quit = namedtuple('Quit', [])
######################################################################
# Output message types
######################################################################
Output = namedtuple('Output', ['message'])
WarningOutput = namedtuple('Warning', ['filename', 'lineno', 'message'])
Progress = namedtuple('Progress', ['stage', 'progress', 'context'])
InitializationStart = namedtuple('InitializationStart', [])
InitializationEnd = namedtuple('InitializationEnd', ['extension'])
BuildStart = namedtuple('BuildStart', ['filenames'])
BuildEnd = namedtuple('BuildEnd', ['filenames'])
######################################################################
# Sphinx handler
######################################################################
class ANSIOutputHandler(object):
"A File-like object that puts output onto a queue, stripping ANSI codes."
def __init__(self, queue):
self.queue = queue
self.buffer = []
def write(self, data):
"Write the given data to the buffer"
start = 0
end = 0
# The data provided by Sphinx may contain ANSI escape sequences. Strip them out.
while end < len(data):
ch = data[end]
if ch == '\x1b':
# Insert any accumulated text with the current mode
self.buffer.append(data[start:end])
# Read the escape code
# mode = data[end + 1]
end = end + 2
params = []
while ord(data[end]) not in range(64, 127):
param = []
while ord(data[end]) not in range(64, 127) and data[end] != ';':
param.append(data[end])
end = end + 1
params.append(int(''.join(param)))
if data[end] == ';':
end = end + 1
# command = data[end]
end = end + 1
start = end
elif ch == '\r' or ch == '\n':
self.buffer.append(data[start:end])
self.flush()
start = end + 1
end = end + 1
self.buffer.append(data[start:end])
def flush(self):
"Flush the current buffer"
if self.buffer:
self.emit(''.join(self.buffer))
self.buffer = []
def emit(self, content):
"""Internal method to actually put the content onto the output queue
Override on subclasses to do parsed content handling.
"""
self.queue.put(Output(message=content))
SIMPLE_PROGRESS_RE = re.compile(r'(.+)\.\.\.$')
PERCENT_PROGRESS_RE = re.compile(r'([\w\s]+)\.\.\. \[([\s\d]{3})\%\] (.+)')
class SphinxStatusHandler(ANSIOutputHandler):
"A Sphinx output handler for normal status update, stripping ANSI codes."
def __init__(self, *args, **kwargs):
super(SphinxStatusHandler, self).__init__(*args, **kwargs)
self.task = None
def emit(self, content):
content = content.strip()
if content:
# Always output the status literally
self.queue.put(Output(content))
# Also check for certain key content, and output special messages
if self.task:
# There is an outstanding simple task. If we've got output, it
# means we've completed that task.
self.queue.put(Progress(stage=self.task, progress=100, context=None))
self.task = None
else:
# Check for simple progress: 'doing stuff...'
progress_match = SIMPLE_PROGRESS_RE.match(content)
if progress_match:
self.task = progress_match.group(1)
else:
# Check for percent progress: 'doing stuff...'
progress_match = PERCENT_PROGRESS_RE.match(content)
if progress_match:
self.queue.put(
Progress(
stage=progress_match.group(1),
progress=int(progress_match.group(2)),
context=progress_match.group(3)
)
)
class SphinxWarningHandler(ANSIOutputHandler):
"""A Sphinx output handler for, stripping ANSI codes..
Parses warning content to extract context.
"""
def emit(self, content):
content = content.strip()
if content:
if content.startswith('WARNING: '):
self.queue.put(WarningOutput(filename=None, lineno=None, message=content[9:]))
else:
parts = content.split(':')
self.queue.put(
WarningOutput(
filename=parts[0],
lineno=int(parts[1]) if parts[1] else None,
message=':'.join(parts[3:]).strip())
)
def sphinx_worker(base_path, work_queue, output_queue):
"A background worker thread performing Sphinx compilations"
# Set up the Sphinx instance
srcdir = base_path
confdir = srcdir
outdir = os.path.join(srcdir, '_build', 'json')
freshenv = False
warningiserror = False
buildername = 'json'
# verbosity = 0
# parallel = 0
status = SphinxStatusHandler(output_queue)
warning = SphinxWarningHandler(output_queue)
# error = sys.stderr
# warnfile = None
confoverrides = {}
tags = []
doctreedir = os.path.join(outdir, '.doctrees')
output_queue.put(InitializationStart())
sphinx = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
confoverrides, status, warning, freshenv,
warningiserror, tags)
output_queue.put(InitializationEnd(extension=sphinx.config.source_suffix))
quit = False
while not quit:
# Get the next command off the work queue
cmd = work_queue.get(block=True)
if isinstance(cmd, Quit):
quit = True
elif isinstance(cmd, ReloadConfig):
output_queue.put(InitializationStart())
freshenv = True
sphinx = Sphinx(srcdir, confdir, outdir, doctreedir, buildername,
confoverrides, status, warning, freshenv,
warningiserror, tags)
output_queue.put(InitializationEnd(extension=sphinx.config.source_suffix))
elif isinstance(cmd, BuildAll):
output_queue.put(BuildStart(filenames=None))
sphinx.builder.build_all()
output_queue.put(BuildEnd(filenames=None))
elif isinstance(cmd, BuildSpecific):
output_queue.put(BuildStart(filenames=cmd.filenames))
sphinx.builder.build_specific(cmd.filenames)
output_queue.put(BuildEnd(filenames=cmd.filenames))
# Reset the warning count so that they don't accumulate between builds.
sphinx._warncount = 0
| en | 0.349851 | ###################################################################### # Command types ###################################################################### ###################################################################### # Output message types ###################################################################### ###################################################################### # Sphinx handler ###################################################################### # The data provided by Sphinx may contain ANSI escape sequences. Strip them out. # Insert any accumulated text with the current mode # Read the escape code # mode = data[end + 1] # command = data[end] Internal method to actually put the content onto the output queue Override on subclasses to do parsed content handling. # Always output the status literally # Also check for certain key content, and output special messages # There is an outstanding simple task. If we've got output, it # means we've completed that task. # Check for simple progress: 'doing stuff...' # Check for percent progress: 'doing stuff...' A Sphinx output handler for, stripping ANSI codes.. Parses warning content to extract context. # Set up the Sphinx instance # verbosity = 0 # parallel = 0 # error = sys.stderr # warnfile = None # Get the next command off the work queue # Reset the warning count so that they don't accumulate between builds. | 2.216965 | 2 |
107_binary_tree_level_order_traversal_ii.py | gengwg/leetcode | 2 | 6615422 | """
107. Binary Tree Level Order Traversal II
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
# same as 102. just add reverse the result.
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
self.preorder(root, 0, res)
# this not work, because reverse() modifies list in place, returns null.
# ans = res.reverse()
res.reverse()
# this not work, because reversed() returns list_reverseiterator not a list.
# return reversed(res)
return res
def preorder(self, root, level, res):
if root:
if len(res) < level + 1:
res.append([])
res[level].append(root.val)
self.preorder(root.left, level + 1, res)
self.preorder(root.right, level + 1, res)
# not using reverse
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
self.preorder(root, 0, res)
return res
def preorder(self, root, level, res):
if root:
if len(res) <= level:
res.insert(0, [])
res[len(res)-level-1].append(root.val)
self.preorder(root.left, level+1, res)
self.preorder(root.right, level+1, res)
# use queue
def levelOrder(self, root):
res = []
if not root:
return res
queue = [root] # store nodes at next level
while queue:
level = [] # store values at each level
for _ in range(len(queue)):
x = queue.pop(0)
level.append(x.val)
if x.left:
queue.append(x.left)
if x.right:
queue.append(x.right)
res.insert(0,level)
return res
~
| """
107. Binary Tree Level Order Traversal II
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
For example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its bottom-up level order traversal as:
[
[15,7],
[9,20],
[3]
]
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
# same as 102. just add reverse the result.
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
self.preorder(root, 0, res)
# this not work, because reverse() modifies list in place, returns null.
# ans = res.reverse()
res.reverse()
# this not work, because reversed() returns list_reverseiterator not a list.
# return reversed(res)
return res
def preorder(self, root, level, res):
if root:
if len(res) < level + 1:
res.append([])
res[level].append(root.val)
self.preorder(root.left, level + 1, res)
self.preorder(root.right, level + 1, res)
# not using reverse
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
res = []
self.preorder(root, 0, res)
return res
def preorder(self, root, level, res):
if root:
if len(res) <= level:
res.insert(0, [])
res[len(res)-level-1].append(root.val)
self.preorder(root.left, level+1, res)
self.preorder(root.right, level+1, res)
# use queue
def levelOrder(self, root):
res = []
if not root:
return res
queue = [root] # store nodes at next level
while queue:
level = [] # store values at each level
for _ in range(len(queue)):
x = queue.pop(0)
level.append(x.val)
if x.left:
queue.append(x.left)
if x.right:
queue.append(x.right)
res.insert(0,level)
return res
~
| en | 0.610431 | 107. Binary Tree Level Order Traversal II Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root). For example: Given binary tree [3,9,20,null,null,15,7], 3 / \ 9 20 / \ 15 7 return its bottom-up level order traversal as: [ [15,7], [9,20], [3] ] # Definition for a binary tree node. # same as 102. just add reverse the result. :type root: TreeNode :rtype: List[List[int]] # this not work, because reverse() modifies list in place, returns null. # ans = res.reverse() # this not work, because reversed() returns list_reverseiterator not a list. # return reversed(res) # not using reverse :type root: TreeNode :rtype: List[List[int]] # use queue # store nodes at next level # store values at each level | 3.922708 | 4 |
myapp/routes.py | NCAR/benchmarking-web | 0 | 6615423 | <filename>myapp/routes.py
import pathlib
from .views import index
PROJECT_ROOT = pathlib.Path(__file__).parent
def setup_routes(app):
app.router.add_get('/', index)
setup_static_routes(app)
def setup_static_routes(app):
app.router.add_static('/static/', path=PROJECT_ROOT / 'static', name='static')
| <filename>myapp/routes.py
import pathlib
from .views import index
PROJECT_ROOT = pathlib.Path(__file__).parent
def setup_routes(app):
app.router.add_get('/', index)
setup_static_routes(app)
def setup_static_routes(app):
app.router.add_static('/static/', path=PROJECT_ROOT / 'static', name='static')
| none | 1 | 2.107235 | 2 | |
enthought/envisage/ui/single_project/action/close_project_action.py | enthought/etsproxy | 3 | 6615424 | # proxy module
from __future__ import absolute_import
from envisage.ui.single_project.action.close_project_action import *
| # proxy module
from __future__ import absolute_import
from envisage.ui.single_project.action.close_project_action import *
| es | 0.125187 | # proxy module | 1.126211 | 1 |
wavepytools/imaging/single_grating/relative_phase.py | APS-XSD-OPT-Group/wavepytools | 3 | 6615425 | '''
this program is used to calculate the relative phase change of different wavepy reconstruction results.
'''
import os
import sys
import numpy as np
import glob
import tkinter as tk
from tkinter import filedialog
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def save_sdf_file(array, pixelsize=[1, 1], fname='output.sdf', extraHeader={}):
'''
Save an 2D array in the `Surface Data File Format (SDF)
<https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ , which can be
viewed
with the program `Gwyddion
<http://gwyddion.net/documentation/user-guide-en/>`_ .
It is also useful because it is a plain
ASCII file
Parameters
----------
array: 2D ndarray
data to be saved as *sdf*
pixelsize: list
list in the format [pixel_size_i, pixel_size_j]
fname: str
output file name
extraHeader: dict
dictionary with extra fields to be added to the header. Note that this
extra header have not effect when using Gwyddion. It is used only for
the asc file and when loaded by :py:func:`wavepy.utils.load_sdf`
as *headerdic*.
See Also
--------
:py:func:`wavepy.utils.load_sdf`
'''
if len(array.shape) != 2:
prColor('ERROR: function save_sdf: array must be 2-dimensional', 'red')
raise TypeError
header = 'relative phase\n' + \
'NumPoints\t=\t' + str(array.shape[1]) + '\n' + \
'NumProfiles\t=\t' + str(array.shape[0]) + '\n' + \
'Xscale\t=\t' + str(pixelsize[1]) + '\n' + \
'Yscale\t=\t' + str(pixelsize[0]) + '\n' + \
'Zscale\t=\t1\n' + \
'Zresolution\t=\t0\n' + \
'Compression\t=\t0\n' + \
'DataType\t=\t7 \n' + \
'CheckType\t=\t0\n' + \
'NumDataSet\t=\t1\n' + \
'NanPresent\t=\t0\n'
for key in extraHeader.keys():
header += key + '\t=\t' + extraHeader[key] + '\n'
header += '*'
if array.dtype == 'float64':
fmt = '%1.8g'
elif array.dtype == 'int64':
fmt = '%d'
else:
fmt = '%f'
np.savetxt(fname, array.flatten(), fmt=fmt, header=header, comments='')
prColor('MESSAGE: ' + fname + ' saved!', 'green')
def load_sdf_file(fname, printHeader=False):
'''
Load an 2D array in the `Surface Data File Format (SDF)
<https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ . The SDF format
is useful because it can be viewed with the program `Gwyddion
<http://gwyddion.net/documentation/user-guide-en/>`_ .
It is also useful because it is a plain
ASCII file
Parameters
----------
fname: str
output file name
Returns
-------
array: 2D ndarray
data loaded from the ``sdf`` file
pixelsize: list
list in the format [pixel_size_i, pixel_size_j]
headerdic
dictionary with the header
Example
-------
>>> import wavepy.utils as wpu
>>> data, pixelsize, headerdic = wpu.load_sdf('test_file.sdf')
See Also
--------
:py:func:`wavepy.utils.save_sdf`
'''
with open(fname) as input_file:
nline = 0
header = ''
if printHeader:
print('########## HEADER from ' + fname)
for line in input_file:
nline += 1
if printHeader:
print(line, end='')
if 'NumPoints' in line:
xpoints = int(line.split('=')[-1])
if 'NumProfiles' in line:
ypoints = int(line.split('=')[-1])
if 'Xscale' in line:
xscale = float(line.split('=')[-1])
if 'Yscale' in line:
yscale = float(line.split('=')[-1])
if 'Zscale' in line:
zscale = float(line.split('=')[-1])
if '*' in line:
break
else:
header += line
if printHeader:
print('########## END HEADER from ' + fname)
# Load data as numpy array
data = np.loadtxt(fname, skiprows=nline)
data = data.reshape(ypoints, xpoints)*zscale
# Load header as a dictionary
headerdic = {}
header = header.replace('\t', '')
for item in header.split('\n'):
items = item.split('=')
if len(items) > 1:
headerdic[items[0]] = items[1]
return data, [yscale, xscale], headerdic
def prColor(word, color_type):
''' function to print color text in terminal
input:
word: word to print
color_type: which color
'red', 'green', 'yellow'
'light_purple', 'purple'
'cyan', 'light_gray'
'black'
'''
end_c = '\033[00m'
if color_type == 'red':
start_c = '\033[91m'
elif color_type == 'green':
start_c = '\033[92m'
elif color_type == 'yellow':
start_c = '\033[93m'
elif color_type == 'light_purple':
start_c = '\033[94m'
elif color_type == 'purple':
start_c = '\033[95m'
elif color_type == 'cyan':
start_c = '\033[96m'
elif color_type == 'light_gray':
start_c = '\033[97m'
elif color_type == 'black':
start_c = '\033[98m'
else:
print('color not right')
sys.exit()
print(start_c + str(word) + end_c)
def gui_load_data_finename(directory='', title="File name with Data"):
originalDir = os.getcwd()
if directory != '':
if os.path.isdir(directory):
os.chdir(directory)
else:
prColor("WARNING: Directory " + directory + " doesn't exist.", 'red')
prColor("MESSAGE: Using current working directory " + originalDir, 'yellow')
root = tk.Tk(title)
root.withdraw()
fname1 = filedialog.askopenfilename()
# fname1 = easyqt.get_file_names(title)
if len(fname1) == 0:
fname_last = None
else:
fname_last = fname1[0]
os.chdir(originalDir)
return fname_last
def gui_load_data_directory(directory='', title="File name with Data"):
originalDir = os.getcwd()
if directory != '':
if os.path.isdir(directory):
os.chdir(directory)
else:
prColor("WARNING: Directory " + directory + " doesn't exist.", 'red')
prColor("MESSAGE: Using current working directory " + originalDir, 'yellow')
root = tk.Tk(title)
root.withdraw()
fname1 = filedialog.askdirectory()
# fname1 = easyqt.get_directory_name(title)
if len(fname1) == 0:
fname_last = None
else:
fname_last = fname1
os.chdir(originalDir)
return fname_last
if __name__ == "__main__":
# define the path to the data folder
file_path = gui_load_data_directory('', 'Path to the phase data folder')
data_path = glob.glob(file_path+'/**/*_phase_*.sdf')
prColor(data_path,'green')
prColor(str(len(data_path))+' data are found', 'green')
listOfData = []
filename_origin = []
for fname in data_path:
prColor('MESSAGE: Open File ' + fname, 'green')
temp_data, pixel_size, headerdic = load_sdf_file(fname)
listOfData.append(temp_data)
filename_origin.append(os.path.basename(os.path.dirname(fname)))
prColor('subdir:' + filename_origin[-1], 'yellow')
origin_data = np.array(listOfData)
phase_data = origin_data - origin_data[0]
y_axis = np.arange(phase_data.shape[1]) * pixel_size[0] * 1e3
x_axis = np.arange(phase_data.shape[2]) * pixel_size[1] * 1e3
YY, XX = np.meshgrid(y_axis, x_axis, indexing='ij')
if not os.path.exists(file_path+'/processed/'):
os.makedirs(file_path+'/processed/')
for kk, phase in enumerate(phase_data):
ax1 = plt.figure()
im = plt.imshow(phase*1e9, cmap=cm.get_cmap('hot'))
plt.colorbar(im, label='surface [nm]')
plt.savefig(file_path+'/processed/'+filename_origin[kk]+'_2D.png')
fig = plt.figure()
ax2 = fig.gca(projection='3d')
surf = ax2.plot_surface(XX, YY, phase*1e9, cmap=cm.get_cmap('hot'),
linewidth=0, antialiased=False)
plt.xlabel('X [mm]')
plt.ylabel('Y [mm]')
fig.colorbar(surf, label='surface [nm]')
plt.savefig(file_path+'/processed/'+filename_origin[kk]+'_3D.png')
save_sdf_file(phase, pixel_size, file_path+'/processed/'+filename_origin[kk]+'_3D.sdf')
if not os.path.exists(file_path+'/origin/'):
os.makedirs(file_path+'/origin/')
for kk, phase in enumerate(origin_data):
ax1 = plt.figure()
im = plt.imshow(phase*1e9, cmap=cm.get_cmap('hot'))
plt.colorbar(im, label='surface [nm]')
plt.savefig(file_path+'/origin/'+filename_origin[kk]+'_2D.png')
fig = plt.figure()
ax2 = fig.gca(projection='3d')
surf = ax2.plot_surface(XX, YY, phase*1e9, cmap=cm.get_cmap('hot'),
linewidth=0, antialiased=False)
plt.xlabel('X [mm]')
plt.ylabel('Y [mm]')
fig.colorbar(surf, label='surface [nm]')
plt.savefig(file_path+'/origin/'+filename_origin[kk]+'_3D.png')
| '''
this program is used to calculate the relative phase change of different wavepy reconstruction results.
'''
import os
import sys
import numpy as np
import glob
import tkinter as tk
from tkinter import filedialog
from matplotlib import pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
def save_sdf_file(array, pixelsize=[1, 1], fname='output.sdf', extraHeader={}):
'''
Save an 2D array in the `Surface Data File Format (SDF)
<https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ , which can be
viewed
with the program `Gwyddion
<http://gwyddion.net/documentation/user-guide-en/>`_ .
It is also useful because it is a plain
ASCII file
Parameters
----------
array: 2D ndarray
data to be saved as *sdf*
pixelsize: list
list in the format [pixel_size_i, pixel_size_j]
fname: str
output file name
extraHeader: dict
dictionary with extra fields to be added to the header. Note that this
extra header have not effect when using Gwyddion. It is used only for
the asc file and when loaded by :py:func:`wavepy.utils.load_sdf`
as *headerdic*.
See Also
--------
:py:func:`wavepy.utils.load_sdf`
'''
if len(array.shape) != 2:
prColor('ERROR: function save_sdf: array must be 2-dimensional', 'red')
raise TypeError
header = 'relative phase\n' + \
'NumPoints\t=\t' + str(array.shape[1]) + '\n' + \
'NumProfiles\t=\t' + str(array.shape[0]) + '\n' + \
'Xscale\t=\t' + str(pixelsize[1]) + '\n' + \
'Yscale\t=\t' + str(pixelsize[0]) + '\n' + \
'Zscale\t=\t1\n' + \
'Zresolution\t=\t0\n' + \
'Compression\t=\t0\n' + \
'DataType\t=\t7 \n' + \
'CheckType\t=\t0\n' + \
'NumDataSet\t=\t1\n' + \
'NanPresent\t=\t0\n'
for key in extraHeader.keys():
header += key + '\t=\t' + extraHeader[key] + '\n'
header += '*'
if array.dtype == 'float64':
fmt = '%1.8g'
elif array.dtype == 'int64':
fmt = '%d'
else:
fmt = '%f'
np.savetxt(fname, array.flatten(), fmt=fmt, header=header, comments='')
prColor('MESSAGE: ' + fname + ' saved!', 'green')
def load_sdf_file(fname, printHeader=False):
'''
Load an 2D array in the `Surface Data File Format (SDF)
<https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ . The SDF format
is useful because it can be viewed with the program `Gwyddion
<http://gwyddion.net/documentation/user-guide-en/>`_ .
It is also useful because it is a plain
ASCII file
Parameters
----------
fname: str
output file name
Returns
-------
array: 2D ndarray
data loaded from the ``sdf`` file
pixelsize: list
list in the format [pixel_size_i, pixel_size_j]
headerdic
dictionary with the header
Example
-------
>>> import wavepy.utils as wpu
>>> data, pixelsize, headerdic = wpu.load_sdf('test_file.sdf')
See Also
--------
:py:func:`wavepy.utils.save_sdf`
'''
with open(fname) as input_file:
nline = 0
header = ''
if printHeader:
print('########## HEADER from ' + fname)
for line in input_file:
nline += 1
if printHeader:
print(line, end='')
if 'NumPoints' in line:
xpoints = int(line.split('=')[-1])
if 'NumProfiles' in line:
ypoints = int(line.split('=')[-1])
if 'Xscale' in line:
xscale = float(line.split('=')[-1])
if 'Yscale' in line:
yscale = float(line.split('=')[-1])
if 'Zscale' in line:
zscale = float(line.split('=')[-1])
if '*' in line:
break
else:
header += line
if printHeader:
print('########## END HEADER from ' + fname)
# Load data as numpy array
data = np.loadtxt(fname, skiprows=nline)
data = data.reshape(ypoints, xpoints)*zscale
# Load header as a dictionary
headerdic = {}
header = header.replace('\t', '')
for item in header.split('\n'):
items = item.split('=')
if len(items) > 1:
headerdic[items[0]] = items[1]
return data, [yscale, xscale], headerdic
def prColor(word, color_type):
''' function to print color text in terminal
input:
word: word to print
color_type: which color
'red', 'green', 'yellow'
'light_purple', 'purple'
'cyan', 'light_gray'
'black'
'''
end_c = '\033[00m'
if color_type == 'red':
start_c = '\033[91m'
elif color_type == 'green':
start_c = '\033[92m'
elif color_type == 'yellow':
start_c = '\033[93m'
elif color_type == 'light_purple':
start_c = '\033[94m'
elif color_type == 'purple':
start_c = '\033[95m'
elif color_type == 'cyan':
start_c = '\033[96m'
elif color_type == 'light_gray':
start_c = '\033[97m'
elif color_type == 'black':
start_c = '\033[98m'
else:
print('color not right')
sys.exit()
print(start_c + str(word) + end_c)
def gui_load_data_finename(directory='', title="File name with Data"):
originalDir = os.getcwd()
if directory != '':
if os.path.isdir(directory):
os.chdir(directory)
else:
prColor("WARNING: Directory " + directory + " doesn't exist.", 'red')
prColor("MESSAGE: Using current working directory " + originalDir, 'yellow')
root = tk.Tk(title)
root.withdraw()
fname1 = filedialog.askopenfilename()
# fname1 = easyqt.get_file_names(title)
if len(fname1) == 0:
fname_last = None
else:
fname_last = fname1[0]
os.chdir(originalDir)
return fname_last
def gui_load_data_directory(directory='', title="File name with Data"):
originalDir = os.getcwd()
if directory != '':
if os.path.isdir(directory):
os.chdir(directory)
else:
prColor("WARNING: Directory " + directory + " doesn't exist.", 'red')
prColor("MESSAGE: Using current working directory " + originalDir, 'yellow')
root = tk.Tk(title)
root.withdraw()
fname1 = filedialog.askdirectory()
# fname1 = easyqt.get_directory_name(title)
if len(fname1) == 0:
fname_last = None
else:
fname_last = fname1
os.chdir(originalDir)
return fname_last
if __name__ == "__main__":
# define the path to the data folder
file_path = gui_load_data_directory('', 'Path to the phase data folder')
data_path = glob.glob(file_path+'/**/*_phase_*.sdf')
prColor(data_path,'green')
prColor(str(len(data_path))+' data are found', 'green')
listOfData = []
filename_origin = []
for fname in data_path:
prColor('MESSAGE: Open File ' + fname, 'green')
temp_data, pixel_size, headerdic = load_sdf_file(fname)
listOfData.append(temp_data)
filename_origin.append(os.path.basename(os.path.dirname(fname)))
prColor('subdir:' + filename_origin[-1], 'yellow')
origin_data = np.array(listOfData)
phase_data = origin_data - origin_data[0]
y_axis = np.arange(phase_data.shape[1]) * pixel_size[0] * 1e3
x_axis = np.arange(phase_data.shape[2]) * pixel_size[1] * 1e3
YY, XX = np.meshgrid(y_axis, x_axis, indexing='ij')
if not os.path.exists(file_path+'/processed/'):
os.makedirs(file_path+'/processed/')
for kk, phase in enumerate(phase_data):
ax1 = plt.figure()
im = plt.imshow(phase*1e9, cmap=cm.get_cmap('hot'))
plt.colorbar(im, label='surface [nm]')
plt.savefig(file_path+'/processed/'+filename_origin[kk]+'_2D.png')
fig = plt.figure()
ax2 = fig.gca(projection='3d')
surf = ax2.plot_surface(XX, YY, phase*1e9, cmap=cm.get_cmap('hot'),
linewidth=0, antialiased=False)
plt.xlabel('X [mm]')
plt.ylabel('Y [mm]')
fig.colorbar(surf, label='surface [nm]')
plt.savefig(file_path+'/processed/'+filename_origin[kk]+'_3D.png')
save_sdf_file(phase, pixel_size, file_path+'/processed/'+filename_origin[kk]+'_3D.sdf')
if not os.path.exists(file_path+'/origin/'):
os.makedirs(file_path+'/origin/')
for kk, phase in enumerate(origin_data):
ax1 = plt.figure()
im = plt.imshow(phase*1e9, cmap=cm.get_cmap('hot'))
plt.colorbar(im, label='surface [nm]')
plt.savefig(file_path+'/origin/'+filename_origin[kk]+'_2D.png')
fig = plt.figure()
ax2 = fig.gca(projection='3d')
surf = ax2.plot_surface(XX, YY, phase*1e9, cmap=cm.get_cmap('hot'),
linewidth=0, antialiased=False)
plt.xlabel('X [mm]')
plt.ylabel('Y [mm]')
fig.colorbar(surf, label='surface [nm]')
plt.savefig(file_path+'/origin/'+filename_origin[kk]+'_3D.png')
| en | 0.664226 | this program is used to calculate the relative phase change of different wavepy reconstruction results. Save an 2D array in the `Surface Data File Format (SDF) <https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ , which can be viewed with the program `Gwyddion <http://gwyddion.net/documentation/user-guide-en/>`_ . It is also useful because it is a plain ASCII file Parameters ---------- array: 2D ndarray data to be saved as *sdf* pixelsize: list list in the format [pixel_size_i, pixel_size_j] fname: str output file name extraHeader: dict dictionary with extra fields to be added to the header. Note that this extra header have not effect when using Gwyddion. It is used only for the asc file and when loaded by :py:func:`wavepy.utils.load_sdf` as *headerdic*. See Also -------- :py:func:`wavepy.utils.load_sdf` Load an 2D array in the `Surface Data File Format (SDF) <https://physics.nist.gov/VSC/jsp/DataFormat.jsp#a>`_ . The SDF format is useful because it can be viewed with the program `Gwyddion <http://gwyddion.net/documentation/user-guide-en/>`_ . It is also useful because it is a plain ASCII file Parameters ---------- fname: str output file name Returns ------- array: 2D ndarray data loaded from the ``sdf`` file pixelsize: list list in the format [pixel_size_i, pixel_size_j] headerdic dictionary with the header Example ------- >>> import wavepy.utils as wpu >>> data, pixelsize, headerdic = wpu.load_sdf('test_file.sdf') See Also -------- :py:func:`wavepy.utils.save_sdf` ######### HEADER from ' + fname) ######### END HEADER from ' + fname) # Load data as numpy array # Load header as a dictionary function to print color text in terminal input: word: word to print color_type: which color 'red', 'green', 'yellow' 'light_purple', 'purple' 'cyan', 'light_gray' 'black' # fname1 = easyqt.get_file_names(title) # fname1 = easyqt.get_directory_name(title) # define the path to the data folder | 3.199902 | 3 |
djpaddle/fields.py | chrisgrande/dj-paddle | 32 | 6615426 | <gh_stars>10-100
from django.db import models
class PaddleCurrencyCodeField(models.CharField):
"""
A field used to store a three-letter currency code (eg. USD, EUR, ...)
"""
def __init__(self, *args, **kwargs):
defaults = {"max_length": 3, "help_text": "Three-letter ISO currency code"}
defaults.update(kwargs)
super().__init__(*args, **defaults)
| from django.db import models
class PaddleCurrencyCodeField(models.CharField):
"""
A field used to store a three-letter currency code (eg. USD, EUR, ...)
"""
def __init__(self, *args, **kwargs):
defaults = {"max_length": 3, "help_text": "Three-letter ISO currency code"}
defaults.update(kwargs)
super().__init__(*args, **defaults) | en | 0.530029 | A field used to store a three-letter currency code (eg. USD, EUR, ...) | 2.671172 | 3 |
app/resources/root.py | vvitsenets/VICTORIA | 1 | 6615427 | import os
import uuid
import mimetypes
import falcon
import cgi
import app.util.json as json
import requests
params = (
('recognize_vehicle', '1'),
('country', 'us'),
('secret_key', 'sk_02ecc708a736670ef7210224'), # TODO secret key to parameters
)
def _recognize(image_file):
files = {
'image': (image_file, open(image_file, 'rb')),
}
response = requests.post('https://api.openalpr.com/v2/recognize', params=params, files=files)
return json.dumps(response)
class RootResources(object):
def on_get(self, req, resp):
resp.body = json.dumps({
"message": "GAF!",
})
class Resource(object): # TODO queue
_CHUNK_SIZE_BYTES = 4096
def __init__(self, storage_path):
self._storage_path = storage_path
def on_post(self, req, resp):
image = req.get_param("image")
ext = mimetypes.guess_extension(req.content_type)
filename = "{uuid}{ext}".format(uuid=uuid.uuid4(), ext=ext)
image_path = os.path.join(self._storage_path, filename)
with open(image_path, "wb") as image_file:
while True:
chunk = image.file.read(4096)
image_file.write(chunk)
if not chunk:
break
resp.status = falcon.HTTP_200
resp.location = filename
# resp.body = json.dumps("{name:" + image_path + "}")
resp.body = json.dumps(_recognize(image_path)) # TODO try except and read from DB
| import os
import uuid
import mimetypes
import falcon
import cgi
import app.util.json as json
import requests
params = (
('recognize_vehicle', '1'),
('country', 'us'),
('secret_key', 'sk_02ecc708a736670ef7210224'), # TODO secret key to parameters
)
def _recognize(image_file):
files = {
'image': (image_file, open(image_file, 'rb')),
}
response = requests.post('https://api.openalpr.com/v2/recognize', params=params, files=files)
return json.dumps(response)
class RootResources(object):
def on_get(self, req, resp):
resp.body = json.dumps({
"message": "GAF!",
})
class Resource(object): # TODO queue
_CHUNK_SIZE_BYTES = 4096
def __init__(self, storage_path):
self._storage_path = storage_path
def on_post(self, req, resp):
image = req.get_param("image")
ext = mimetypes.guess_extension(req.content_type)
filename = "{uuid}{ext}".format(uuid=uuid.uuid4(), ext=ext)
image_path = os.path.join(self._storage_path, filename)
with open(image_path, "wb") as image_file:
while True:
chunk = image.file.read(4096)
image_file.write(chunk)
if not chunk:
break
resp.status = falcon.HTTP_200
resp.location = filename
# resp.body = json.dumps("{name:" + image_path + "}")
resp.body = json.dumps(_recognize(image_path)) # TODO try except and read from DB
| en | 0.614436 | # TODO secret key to parameters # TODO queue # resp.body = json.dumps("{name:" + image_path + "}") # TODO try except and read from DB | 2.466621 | 2 |
2126.py | heltonricardo/URI | 6 | 6615428 | <gh_stars>1-10
i = 1
while True:
try:
a = str(input())
b = str(input())
print('Caso #{}:'.format(i))
s = b.count(a)
if s:
print('Qtd.Subsequencias:', b.count(a))
print('Pos:', b.rfind(a) + 1)
else:
print('Nao existe subsequencia')
print()
i += 1
except EOFError:
break
| i = 1
while True:
try:
a = str(input())
b = str(input())
print('Caso #{}:'.format(i))
s = b.count(a)
if s:
print('Qtd.Subsequencias:', b.count(a))
print('Pos:', b.rfind(a) + 1)
else:
print('Nao existe subsequencia')
print()
i += 1
except EOFError:
break | ar | 0.216553 | #{}:'.format(i)) | 3.19191 | 3 |
plotting/aggregators.py | axelbr/dreamer | 1 | 6615429 | import warnings
from typing import Tuple
import numpy as np
from abc import abstractmethod
class Aggregator:
@property
@abstractmethod
def reducer(self):
pass
def __call__(self, runs, binning, minx=0, maxx=8000000) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Aggregate data according to the given binning. If `binning` is None aggregate over the all range.
:param runs: list of Run structs
:param binning: binning size
:return: tuple (x, mean, min, max)
"""
all_x, all_y = np.concatenate([r.x for r in runs]), np.concatenate([r.y for r in runs])
order = np.argsort(all_x)
all_x, all_y = all_x[order], all_y[order]
if binning is None:
binned_x = [np.max(all_x)]
else:
minx = max(minx, all_x.min())
maxx = min(maxx, all_x.max()+binning)
binned_x = np.arange(minx, maxx, binning)
binned_mean, binned_min, binned_max = [], [], []
for start, stop in zip([-np.inf] + list(binned_x), list(binned_x)):
left = (all_x <= start).sum()
right = (all_x <= stop).sum()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mean, min_margin, max_margin = self.reducer(all_y[left:right])
except RuntimeWarning as wrn:
print(f'[WARNING] {wrn}. Consider to increase the binning')
exit(1)
binned_mean.append(mean)
binned_min.append(min_margin)
binned_max.append(max_margin)
return np.array(binned_x), np.array(binned_mean), np.array(binned_min), np.array(binned_max)
class MeanStd(Aggregator):
@property
def reducer(self):
return lambda x: (np.nanmean(np.array(x)), np.nanmean(np.array(x)) - np.nanstd(np.array(x)),
np.nanmean(np.array(x)) + np.nanstd(np.array(x)))
class MeanMinMax(Aggregator):
@property
def reducer(self):
return lambda x: (np.nanmean(np.array(x)), np.nanmin(np.array(x)), np.nanmax(np.array(x)))
| import warnings
from typing import Tuple
import numpy as np
from abc import abstractmethod
class Aggregator:
@property
@abstractmethod
def reducer(self):
pass
def __call__(self, runs, binning, minx=0, maxx=8000000) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Aggregate data according to the given binning. If `binning` is None aggregate over the all range.
:param runs: list of Run structs
:param binning: binning size
:return: tuple (x, mean, min, max)
"""
all_x, all_y = np.concatenate([r.x for r in runs]), np.concatenate([r.y for r in runs])
order = np.argsort(all_x)
all_x, all_y = all_x[order], all_y[order]
if binning is None:
binned_x = [np.max(all_x)]
else:
minx = max(minx, all_x.min())
maxx = min(maxx, all_x.max()+binning)
binned_x = np.arange(minx, maxx, binning)
binned_mean, binned_min, binned_max = [], [], []
for start, stop in zip([-np.inf] + list(binned_x), list(binned_x)):
left = (all_x <= start).sum()
right = (all_x <= stop).sum()
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
mean, min_margin, max_margin = self.reducer(all_y[left:right])
except RuntimeWarning as wrn:
print(f'[WARNING] {wrn}. Consider to increase the binning')
exit(1)
binned_mean.append(mean)
binned_min.append(min_margin)
binned_max.append(max_margin)
return np.array(binned_x), np.array(binned_mean), np.array(binned_min), np.array(binned_max)
class MeanStd(Aggregator):
@property
def reducer(self):
return lambda x: (np.nanmean(np.array(x)), np.nanmean(np.array(x)) - np.nanstd(np.array(x)),
np.nanmean(np.array(x)) + np.nanstd(np.array(x)))
class MeanMinMax(Aggregator):
@property
def reducer(self):
return lambda x: (np.nanmean(np.array(x)), np.nanmin(np.array(x)), np.nanmax(np.array(x)))
| en | 0.782752 | Aggregate data according to the given binning. If `binning` is None aggregate over the all range. :param runs: list of Run structs :param binning: binning size :return: tuple (x, mean, min, max) | 2.506914 | 3 |
tests/test_tupledict_request.py | moctarjallo/pythonapi | 0 | 6615430 | <gh_stars>0
import unittest
import pythonapi as api
class TestTupleDictRequest(unittest.TestCase):
def test_simple(self):
data = ({'a':1}, {'b':2}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': 1,
'b': 2,
'c': 3
})
def test_imbricated_tuple_in_dict_value(self):
data = ({'a':({'d': 4}, {'e': 5})}, {'b':2}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': {
'd':4,
'e':5
},
'b': 2,
'c': 3
})
def test_triply_imbricated_tuple_in_dict_value(self):
data = ({'a':1}, {'b':({'f':({'d':4}, {'e':5})},)}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': 1,
'b': {
'f':{
'd':4,
'e':5
}
},
'c': 3
})
if __name__ == '__main__':
unittest.main() | import unittest
import pythonapi as api
class TestTupleDictRequest(unittest.TestCase):
def test_simple(self):
data = ({'a':1}, {'b':2}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': 1,
'b': 2,
'c': 3
})
def test_imbricated_tuple_in_dict_value(self):
data = ({'a':({'d': 4}, {'e': 5})}, {'b':2}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': {
'd':4,
'e':5
},
'b': 2,
'c': 3
})
def test_triply_imbricated_tuple_in_dict_value(self):
data = ({'a':1}, {'b':({'f':({'d':4}, {'e':5})},)}, {'c':3})
req = api.TupleDictRequest(data)
self.assertEqual(req.data, {
'a': 1,
'b': {
'f':{
'd':4,
'e':5
}
},
'c': 3
})
if __name__ == '__main__':
unittest.main() | none | 1 | 2.930781 | 3 | |
services/umessage/errorcodes.py | focusonecc/notification | 0 | 6615431 | <reponame>focusonecc/notification<filename>services/umessage/errorcodes.py<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: <NAME> (<EMAIL>)
__revision__ = '0.1'
__all__ = [
'HTTPStatusCode',
'APIServerErrorCode',
'UMPushError',
'UMHTTPError'
]
class UMPushError(Exception):
error_code = 0
error_msg = ''
def __init__(self, error_code, params):
error_msg = '{} ==== {}'.format(API_SERVER_ERROR_CODE_MESSAGE[error_code], params)
super(UMPushError, self).__init__(error_msg)
self.error_code = error_code.value
self.error_msg = API_SERVER_ERROR_CODE_MESSAGE[error_code]
class UMHTTPError(Exception):
def __init__(self, http_code):
super(UMHTTPError, self).__init__("HTTP Code {}".format(http_code))
class HTTPStatusCode(object):
OK = 200
CREATED = 201
ACCEPTED = 202
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
INTERNAL_SERVER_ERROR = 500
class APIServerErrorCode(object):
NO_APP_KEY = 1000
NO_PAY_LOAD = 1001
NO_PAY_LOAD_BODY = 1002
NO_CUSTOM = 1003
NO_DISPLAY_TYPE = 1004
WRONG_IMG_URL = 1005
WRONG_SOUND_URL = 1006
WRONG_URL = 1007
NO_TICKER_IN_BODY = 1008
NO_TITLE_IN_BODY = 1009
NO_TEXT_IN_BODY = 1010
WRONG_PLAY_VIBRATE = 1011
WRONG_PLAY_LIGHTS = 1012
WRONG_PLAY_SOUND = 1013
NO_TASK_ID = 1014
NO_DEVICE_TOKENS = 1015
NO_TYPE = 1016
WRONG_PRODUCTION_MODE = 1017
WRONG_APP_KEY_NO_PUSH = 1018
WRONG_DISPLAY_TYPE = 1019
NO_APPLICATION_ADDED = 1020
DISABLED_APPLICATION = 2000
WRONG_EXPIRATION_TIME = 2001
WRONG_REGULAR_TIME = 2002
WRONG_EXPIRATION_TIME_FOR_REGULAR_TIME = 2003
NO_WHITE_LIST = 2004
NO_MESSAGE_EXIST = 2005
WRONG_VALIDATION_TOKEN = <PASSWORD>
WRONG_KEY_OR_MASTER_SECRET = 2007
WRONG_JSON = 2008
FILL_IN_ALIAS_OR_FILE_ID = 2009
NULL_DEVICE_TOKEN = 2010
ALIAS_GREATER_THAN_FIFTY = 2011
APP_KEY_GREATER_THAN_THREE = 2012
MESSAGE_IN_LINE = 2013
MESSAGE_CANCEL_FAILED = 2014
DEVICE_TOKENS_GREATER_THAN_FIFTY = 2015
FILL_IN_FILTER = 2016
ADDED_TAG_FAILED = 2017
FILL_IN_FILE_ID = 2018
NO_FILE_EXIST = 2019
SERVICE_UPGRADE = 2020
NO_APP_KEY_EXIST = 2021
PAY_LOAD_TOO_LONG = 2022
FILE_UPLOAD_FAILED = 2023
SPEED_LIMIT_NO_POSITIVE_INTEGER = 2024
NULL_APS = 2025
SEND_MORE_THAN_TEN_PER_MINUTE = 2026
WRONG_SIGNATURE = 2027
TIMESTAMP_EXPIRED = 2028
NULL_CONTENT = 2029
WRONG_LAUNCH_FROM_DATE = 2030
WRONG_FILTER_FORMAT = 2031
NULL_RPODUCTION_IOSCERT = 2032
NULL_DEVELOPMENT_IOSCERT = 2033
CERTIFICATE_EXPIRED = 2034
CERTIFICATE_EXPIRED_TIMER = 2035
WRONG_TIMESPAN_FORMAT = 2036
WRONG_UPLOAD_FILE = 2038
WRONG_TIME_FORMAT = 2039
EXPIRED_TIME_TOLONG = 2040
DATABASE_ERROR_ONE = 3000
DATABASE_ERROR_TWO = 3001
DATABASE_ERROR_THREE = 3002
DATABASE_ERROR_FOUR = 3003
DATABASE_ERROR_FIVE = 3004
SYSTEM_ERROR = 4000
SYSTEM_BUSY = 4001
OPERATION_FAILED = 4002
WRONG_APP_KEY_FORMAT = 4003
WRONG_MESSAGE_TYPE_FORMAT = 4004
WRONG_MSG_FORMAT = 4005
WRONG_BODY_FORMAT = 4006
WRONG_DELIVER_POLICY_FORMAT = 4007
WRONG_INVALID_TIME_FORMAT = 4008
FULL_QUEUE = 4009
WRONG_DEVICE_NUMBER_FORMAT = 4010
INVALID_MESSAGE_EXPANDED_FIELD = 4011
NO_ACCESS_AUTHORITY = 4012
ASYNCHRONOUS_SEND_MESSAGE_FAILED = 4013
WRONG_APP_KEY_TO_DEVICE_TOKENS = 4014
NO_APPLICATION_INFO = 4015
WRONG_FILE_CODE = 4016
WRONG_FILE_TYPE = 4017
WRONG_FILE_REMOTE_ADDRESS = 4018
WRONG_FILE_DESCRIPTION = 4019
WRONG_DEVICE_TOKEN = 4020
HSF_TIME_OUT = 4021
APP_KEY_REGISTER = 4022
SERVER_NET_ERROR = 4023
ILLEGAL_ACCESS = 4024
DEVICE_TOKEN_ALL_FAILED = 4025
DEVICE_TOKEN_PART_FAILED = 4026
PULL_FILE_FAILED = 4027
DEVICE_TOKEN_ERROR = 5000
NO_CERTIFICATE = 5001
UMENG_RESERVED_FIELD = 5002
NULL_ALERT = 5003
WRONG_ALERT = 5004
WRONG_DEVICE_TOKEN_FORMAT = 5005
CREATE_SOCKET_ERROR = 5006
WRONG_CERTIFICATE_REVOKED = 5007
WRONG_CERTIFICATE_UNKOWN = 5008
WRONG_HANDSHAKE_FAILURE = 5009
@classmethod
def errorMessage(self, errorCode):
if errorCode in API_SERVER_ERROR_CODE_MESSAGE:
return API_SERVER_ERROR_CODE_MESSAGE[errorCode]
else:
return "error code not defined"
API_SERVER_ERROR_CODE_MESSAGE = {
# NO.1000~1020
APIServerErrorCode.NO_APP_KEY: '请求参数没有appkey',
APIServerErrorCode.NO_PAY_LOAD: '请求参数没有payload',
APIServerErrorCode.NO_PAY_LOAD_BODY: '请求参数payload中没有body',
APIServerErrorCode.NO_CUSTOM: 'display_type为message时,请求参数没有custom',
APIServerErrorCode.NO_DISPLAY_TYPE: '请求参数没有display_type',
APIServerErrorCode.WRONG_IMG_URL: 'img url格式不对,请以https或者http开始',
APIServerErrorCode.WRONG_SOUND_URL: 'sound url格式不对,请以https或者http开始',
APIServerErrorCode.WRONG_URL: 'url格式不对,请以https或者http开始',
APIServerErrorCode.NO_TICKER_IN_BODY: 'display_type为notification时,body中ticker不能为空',
APIServerErrorCode.NO_TITLE_IN_BODY: 'display_type为notification时,body中title不能为空',
APIServerErrorCode.NO_TEXT_IN_BODY: 'display_type为notification时,body中text不能为空',
APIServerErrorCode.WRONG_PLAY_VIBRATE: 'play_vibrate的值只能为true或者false',
APIServerErrorCode.WRONG_PLAY_LIGHTS: 'play_lights的值只能为true或者false',
APIServerErrorCode.WRONG_PLAY_SOUND: 'play_sound的值只能为true或者false',
APIServerErrorCode.NO_TASK_ID: 'task-id没有找到',
APIServerErrorCode.NO_DEVICE_TOKENS: '请求参数中没有device_tokens',
APIServerErrorCode.NO_TYPE: '请求参数没有type',
APIServerErrorCode.WRONG_PRODUCTION_MODE: 'production_mode只能为true或者false',
APIServerErrorCode.WRONG_APP_KEY_NO_PUSH: 'appkey错误:指定的appkey尚未开通推送服务',
APIServerErrorCode.WRONG_DISPLAY_TYPE: 'display_type填写错误',
APIServerErrorCode.NO_APPLICATION_ADDED: '应用组中尚未添加应用',
# NO.2000~2040
APIServerErrorCode.DISABLED_APPLICATION: '该应用已被禁用',
APIServerErrorCode.WRONG_EXPIRATION_TIME: '过期时间必须大于当前时间',
APIServerErrorCode.WRONG_REGULAR_TIME: '定时发送时间必须大于当前时间',
APIServerErrorCode.WRONG_EXPIRATION_TIME_FOR_REGULAR_TIME: '过期时间必须大于定时发送时间',
APIServerErrorCode.NO_WHITE_LIST: 'IP白名单尚未添加, 请到网站后台添加您的服务器IP白名单',
APIServerErrorCode.NO_MESSAGE_EXIST: '该消息不存在',
APIServerErrorCode.WRONG_VALIDATION_TOKEN: 'validation token错误',
APIServerErrorCode.WRONG_KEY_OR_MASTER_SECRET: 'appkey或app_master_secret错误',
APIServerErrorCode.WRONG_JSON: 'json解析错误',
APIServerErrorCode.FILL_IN_ALIAS_OR_FILE_ID: '请填写alias或者file_id',
APIServerErrorCode.NULL_DEVICE_TOKEN: '与alias对应的device_tokens为空',
APIServerErrorCode.ALIAS_GREATER_THAN_FIFTY: 'alias个数已超过50',
APIServerErrorCode.APP_KEY_GREATER_THAN_THREE: '此appkey今天的广播数已超过3次',
APIServerErrorCode.MESSAGE_IN_LINE: '消息还在排队,请稍候再查询',
APIServerErrorCode.MESSAGE_CANCEL_FAILED: '消息取消失败,请稍候再试',
APIServerErrorCode.DEVICE_TOKENS_GREATER_THAN_FIFTY: 'device_tokens个数已超过50',
APIServerErrorCode.FILL_IN_FILTER: '请填写filter',
APIServerErrorCode.ADDED_TAG_FAILED: '添加tag失败',
APIServerErrorCode.FILL_IN_FILE_ID: '请填写file_id',
APIServerErrorCode.NO_FILE_EXIST: '与此file_id对应的文件不存在',
APIServerErrorCode.SERVICE_UPGRADE: '服务正在升级中,请稍候再试',
APIServerErrorCode.NO_APP_KEY_EXIST: 'appkey不存在',
APIServerErrorCode.PAY_LOAD_TOO_LONG: 'payload长度过长',
APIServerErrorCode.FILE_UPLOAD_FAILED: '文件上传失败,请重试',
APIServerErrorCode.SPEED_LIMIT_NO_POSITIVE_INTEGER: '限速值必须为正整数',
APIServerErrorCode.NULL_APS: 'aps字段不能为空',
APIServerErrorCode.SEND_MORE_THAN_TEN_PER_MINUTE: '1分钟内发送次数超出10次',
APIServerErrorCode.WRONG_SIGNATURE: '签名不正确',
APIServerErrorCode.TIMESTAMP_EXPIRED: '时间戳已过期',
APIServerErrorCode.NULL_CONTENT: 'content内容不能为空',
APIServerErrorCode.WRONG_LAUNCH_FROM_DATE: 'launch_from/not_launch_from条件中的日期须小于发送日期',
APIServerErrorCode.WRONG_FILTER_FORMAT: 'filter格式不正确 ',
APIServerErrorCode.NULL_RPODUCTION_IOSCERT: '未上传生产证书,请到Web后台上传',
APIServerErrorCode.NULL_DEVELOPMENT_IOSCERT: '未上传开发证书,请到Web后台上传',
APIServerErrorCode.CERTIFICATE_EXPIRED: '证书已过期',
APIServerErrorCode.CERTIFICATE_EXPIRED_TIMER: '定时任务证书过期',
APIServerErrorCode.WRONG_TIMESPAN_FORMAT: '时间戳格式错误',
APIServerErrorCode.WRONG_UPLOAD_FILE: '文件上传失败',
APIServerErrorCode.WRONG_TIME_FORMAT: '时间格式必须是yyyy - MM - dd HH:mm:ss',
APIServerErrorCode.EXPIRED_TIME_TOLONG: '过期时间不能超过7天',
# NO.3000~3004
APIServerErrorCode.DATABASE_ERROR_ONE: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_TWO: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_THREE: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_FOUR: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_FIVE: '数据库错误',
# NO.4000~4027
APIServerErrorCode.SYSTEM_ERROR: '系统错误',
APIServerErrorCode.SYSTEM_BUSY: '系统忙',
APIServerErrorCode.OPERATION_FAILED: '操作失败',
APIServerErrorCode.WRONG_APP_KEY_FORMAT: 'appkey格式错误',
APIServerErrorCode.WRONG_MESSAGE_TYPE_FORMAT: '消息类型格式错误',
APIServerErrorCode.WRONG_MSG_FORMAT: 'msg格式错误',
APIServerErrorCode.WRONG_BODY_FORMAT: 'body格式错误',
APIServerErrorCode.WRONG_DELIVER_POLICY_FORMAT: 'deliverPolicy格式错误',
APIServerErrorCode.WRONG_INVALID_TIME_FORMAT: '失效时间格式错误',
APIServerErrorCode.FULL_QUEUE: '单个服务器队列已满',
APIServerErrorCode.WRONG_DEVICE_NUMBER_FORMAT: '设备号格式错误',
APIServerErrorCode.INVALID_MESSAGE_EXPANDED_FIELD: '消息扩展字段无效',
APIServerErrorCode.NO_ACCESS_AUTHORITY: '没有权限访问',
APIServerErrorCode.ASYNCHRONOUS_SEND_MESSAGE_FAILED: '异步发送消息失败',
APIServerErrorCode.WRONG_APP_KEY_TO_DEVICE_TOKENS: 'appkey和device_tokens不对应',
APIServerErrorCode.NO_APPLICATION_INFO: '没有找到应用信息',
APIServerErrorCode.WRONG_FILE_CODE: '文件编码有误',
APIServerErrorCode.WRONG_FILE_TYPE: '文件类型有误',
APIServerErrorCode.WRONG_FILE_REMOTE_ADDRESS: '文件远程地址有误',
APIServerErrorCode.WRONG_FILE_DESCRIPTION: '文件描述信息有误',
APIServerErrorCode.WRONG_DEVICE_TOKEN: 'device_token有误(注意,友盟的device_token是严格的44位字符串)',
APIServerErrorCode.HSF_TIME_OUT: 'HSF异步服务超时',
APIServerErrorCode.APP_KEY_REGISTER: 'appkey已经注册',
APIServerErrorCode.SERVER_NET_ERROR: '服务器网络异常',
APIServerErrorCode.ILLEGAL_ACCESS: '非法访问',
APIServerErrorCode.DEVICE_TOKEN_ALL_FAILED: 'device-token全部失败',
APIServerErrorCode.DEVICE_TOKEN_PART_FAILED: 'device-token部分失败',
APIServerErrorCode.PULL_FILE_FAILED: '拉取文件失败',
# NO.5000~5009
APIServerErrorCode.DEVICE_TOKEN_ERROR: 'device_token错误',
APIServerErrorCode.NO_CERTIFICATE: '证书不存在',
APIServerErrorCode.UMENG_RESERVED_FIELD: 'p,d是umeng保留字段',
APIServerErrorCode.NULL_ALERT: 'alert字段不能为空',
APIServerErrorCode.WRONG_ALERT: 'alert只能是String类型',
APIServerErrorCode.WRONG_DEVICE_TOKEN_FORMAT: 'device_token格式错误',
APIServerErrorCode.CREATE_SOCKET_ERROR: '创建socket错误',
APIServerErrorCode.WRONG_CERTIFICATE_REVOKED: 'certificate_revoked错误',
APIServerErrorCode.WRONG_CERTIFICATE_UNKOWN: 'certificate_unkown错误',
APIServerErrorCode.WRONG_HANDSHAKE_FAILURE: 'handshake_failure错误',
}
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: <NAME> (<EMAIL>)
__revision__ = '0.1'
__all__ = [
'HTTPStatusCode',
'APIServerErrorCode',
'UMPushError',
'UMHTTPError'
]
class UMPushError(Exception):
error_code = 0
error_msg = ''
def __init__(self, error_code, params):
error_msg = '{} ==== {}'.format(API_SERVER_ERROR_CODE_MESSAGE[error_code], params)
super(UMPushError, self).__init__(error_msg)
self.error_code = error_code.value
self.error_msg = API_SERVER_ERROR_CODE_MESSAGE[error_code]
class UMHTTPError(Exception):
def __init__(self, http_code):
super(UMHTTPError, self).__init__("HTTP Code {}".format(http_code))
class HTTPStatusCode(object):
OK = 200
CREATED = 201
ACCEPTED = 202
BAD_REQUEST = 400
UNAUTHORIZED = 401
FORBIDDEN = 403
NOT_FOUND = 404
INTERNAL_SERVER_ERROR = 500
class APIServerErrorCode(object):
NO_APP_KEY = 1000
NO_PAY_LOAD = 1001
NO_PAY_LOAD_BODY = 1002
NO_CUSTOM = 1003
NO_DISPLAY_TYPE = 1004
WRONG_IMG_URL = 1005
WRONG_SOUND_URL = 1006
WRONG_URL = 1007
NO_TICKER_IN_BODY = 1008
NO_TITLE_IN_BODY = 1009
NO_TEXT_IN_BODY = 1010
WRONG_PLAY_VIBRATE = 1011
WRONG_PLAY_LIGHTS = 1012
WRONG_PLAY_SOUND = 1013
NO_TASK_ID = 1014
NO_DEVICE_TOKENS = 1015
NO_TYPE = 1016
WRONG_PRODUCTION_MODE = 1017
WRONG_APP_KEY_NO_PUSH = 1018
WRONG_DISPLAY_TYPE = 1019
NO_APPLICATION_ADDED = 1020
DISABLED_APPLICATION = 2000
WRONG_EXPIRATION_TIME = 2001
WRONG_REGULAR_TIME = 2002
WRONG_EXPIRATION_TIME_FOR_REGULAR_TIME = 2003
NO_WHITE_LIST = 2004
NO_MESSAGE_EXIST = 2005
WRONG_VALIDATION_TOKEN = <PASSWORD>
WRONG_KEY_OR_MASTER_SECRET = 2007
WRONG_JSON = 2008
FILL_IN_ALIAS_OR_FILE_ID = 2009
NULL_DEVICE_TOKEN = 2010
ALIAS_GREATER_THAN_FIFTY = 2011
APP_KEY_GREATER_THAN_THREE = 2012
MESSAGE_IN_LINE = 2013
MESSAGE_CANCEL_FAILED = 2014
DEVICE_TOKENS_GREATER_THAN_FIFTY = 2015
FILL_IN_FILTER = 2016
ADDED_TAG_FAILED = 2017
FILL_IN_FILE_ID = 2018
NO_FILE_EXIST = 2019
SERVICE_UPGRADE = 2020
NO_APP_KEY_EXIST = 2021
PAY_LOAD_TOO_LONG = 2022
FILE_UPLOAD_FAILED = 2023
SPEED_LIMIT_NO_POSITIVE_INTEGER = 2024
NULL_APS = 2025
SEND_MORE_THAN_TEN_PER_MINUTE = 2026
WRONG_SIGNATURE = 2027
TIMESTAMP_EXPIRED = 2028
NULL_CONTENT = 2029
WRONG_LAUNCH_FROM_DATE = 2030
WRONG_FILTER_FORMAT = 2031
NULL_RPODUCTION_IOSCERT = 2032
NULL_DEVELOPMENT_IOSCERT = 2033
CERTIFICATE_EXPIRED = 2034
CERTIFICATE_EXPIRED_TIMER = 2035
WRONG_TIMESPAN_FORMAT = 2036
WRONG_UPLOAD_FILE = 2038
WRONG_TIME_FORMAT = 2039
EXPIRED_TIME_TOLONG = 2040
DATABASE_ERROR_ONE = 3000
DATABASE_ERROR_TWO = 3001
DATABASE_ERROR_THREE = 3002
DATABASE_ERROR_FOUR = 3003
DATABASE_ERROR_FIVE = 3004
SYSTEM_ERROR = 4000
SYSTEM_BUSY = 4001
OPERATION_FAILED = 4002
WRONG_APP_KEY_FORMAT = 4003
WRONG_MESSAGE_TYPE_FORMAT = 4004
WRONG_MSG_FORMAT = 4005
WRONG_BODY_FORMAT = 4006
WRONG_DELIVER_POLICY_FORMAT = 4007
WRONG_INVALID_TIME_FORMAT = 4008
FULL_QUEUE = 4009
WRONG_DEVICE_NUMBER_FORMAT = 4010
INVALID_MESSAGE_EXPANDED_FIELD = 4011
NO_ACCESS_AUTHORITY = 4012
ASYNCHRONOUS_SEND_MESSAGE_FAILED = 4013
WRONG_APP_KEY_TO_DEVICE_TOKENS = 4014
NO_APPLICATION_INFO = 4015
WRONG_FILE_CODE = 4016
WRONG_FILE_TYPE = 4017
WRONG_FILE_REMOTE_ADDRESS = 4018
WRONG_FILE_DESCRIPTION = 4019
WRONG_DEVICE_TOKEN = 4020
HSF_TIME_OUT = 4021
APP_KEY_REGISTER = 4022
SERVER_NET_ERROR = 4023
ILLEGAL_ACCESS = 4024
DEVICE_TOKEN_ALL_FAILED = 4025
DEVICE_TOKEN_PART_FAILED = 4026
PULL_FILE_FAILED = 4027
DEVICE_TOKEN_ERROR = 5000
NO_CERTIFICATE = 5001
UMENG_RESERVED_FIELD = 5002
NULL_ALERT = 5003
WRONG_ALERT = 5004
WRONG_DEVICE_TOKEN_FORMAT = 5005
CREATE_SOCKET_ERROR = 5006
WRONG_CERTIFICATE_REVOKED = 5007
WRONG_CERTIFICATE_UNKOWN = 5008
WRONG_HANDSHAKE_FAILURE = 5009
@classmethod
def errorMessage(self, errorCode):
if errorCode in API_SERVER_ERROR_CODE_MESSAGE:
return API_SERVER_ERROR_CODE_MESSAGE[errorCode]
else:
return "error code not defined"
API_SERVER_ERROR_CODE_MESSAGE = {
# NO.1000~1020
APIServerErrorCode.NO_APP_KEY: '请求参数没有appkey',
APIServerErrorCode.NO_PAY_LOAD: '请求参数没有payload',
APIServerErrorCode.NO_PAY_LOAD_BODY: '请求参数payload中没有body',
APIServerErrorCode.NO_CUSTOM: 'display_type为message时,请求参数没有custom',
APIServerErrorCode.NO_DISPLAY_TYPE: '请求参数没有display_type',
APIServerErrorCode.WRONG_IMG_URL: 'img url格式不对,请以https或者http开始',
APIServerErrorCode.WRONG_SOUND_URL: 'sound url格式不对,请以https或者http开始',
APIServerErrorCode.WRONG_URL: 'url格式不对,请以https或者http开始',
APIServerErrorCode.NO_TICKER_IN_BODY: 'display_type为notification时,body中ticker不能为空',
APIServerErrorCode.NO_TITLE_IN_BODY: 'display_type为notification时,body中title不能为空',
APIServerErrorCode.NO_TEXT_IN_BODY: 'display_type为notification时,body中text不能为空',
APIServerErrorCode.WRONG_PLAY_VIBRATE: 'play_vibrate的值只能为true或者false',
APIServerErrorCode.WRONG_PLAY_LIGHTS: 'play_lights的值只能为true或者false',
APIServerErrorCode.WRONG_PLAY_SOUND: 'play_sound的值只能为true或者false',
APIServerErrorCode.NO_TASK_ID: 'task-id没有找到',
APIServerErrorCode.NO_DEVICE_TOKENS: '请求参数中没有device_tokens',
APIServerErrorCode.NO_TYPE: '请求参数没有type',
APIServerErrorCode.WRONG_PRODUCTION_MODE: 'production_mode只能为true或者false',
APIServerErrorCode.WRONG_APP_KEY_NO_PUSH: 'appkey错误:指定的appkey尚未开通推送服务',
APIServerErrorCode.WRONG_DISPLAY_TYPE: 'display_type填写错误',
APIServerErrorCode.NO_APPLICATION_ADDED: '应用组中尚未添加应用',
# NO.2000~2040
APIServerErrorCode.DISABLED_APPLICATION: '该应用已被禁用',
APIServerErrorCode.WRONG_EXPIRATION_TIME: '过期时间必须大于当前时间',
APIServerErrorCode.WRONG_REGULAR_TIME: '定时发送时间必须大于当前时间',
APIServerErrorCode.WRONG_EXPIRATION_TIME_FOR_REGULAR_TIME: '过期时间必须大于定时发送时间',
APIServerErrorCode.NO_WHITE_LIST: 'IP白名单尚未添加, 请到网站后台添加您的服务器IP白名单',
APIServerErrorCode.NO_MESSAGE_EXIST: '该消息不存在',
APIServerErrorCode.WRONG_VALIDATION_TOKEN: 'validation token错误',
APIServerErrorCode.WRONG_KEY_OR_MASTER_SECRET: 'appkey或app_master_secret错误',
APIServerErrorCode.WRONG_JSON: 'json解析错误',
APIServerErrorCode.FILL_IN_ALIAS_OR_FILE_ID: '请填写alias或者file_id',
APIServerErrorCode.NULL_DEVICE_TOKEN: '与alias对应的device_tokens为空',
APIServerErrorCode.ALIAS_GREATER_THAN_FIFTY: 'alias个数已超过50',
APIServerErrorCode.APP_KEY_GREATER_THAN_THREE: '此appkey今天的广播数已超过3次',
APIServerErrorCode.MESSAGE_IN_LINE: '消息还在排队,请稍候再查询',
APIServerErrorCode.MESSAGE_CANCEL_FAILED: '消息取消失败,请稍候再试',
APIServerErrorCode.DEVICE_TOKENS_GREATER_THAN_FIFTY: 'device_tokens个数已超过50',
APIServerErrorCode.FILL_IN_FILTER: '请填写filter',
APIServerErrorCode.ADDED_TAG_FAILED: '添加tag失败',
APIServerErrorCode.FILL_IN_FILE_ID: '请填写file_id',
APIServerErrorCode.NO_FILE_EXIST: '与此file_id对应的文件不存在',
APIServerErrorCode.SERVICE_UPGRADE: '服务正在升级中,请稍候再试',
APIServerErrorCode.NO_APP_KEY_EXIST: 'appkey不存在',
APIServerErrorCode.PAY_LOAD_TOO_LONG: 'payload长度过长',
APIServerErrorCode.FILE_UPLOAD_FAILED: '文件上传失败,请重试',
APIServerErrorCode.SPEED_LIMIT_NO_POSITIVE_INTEGER: '限速值必须为正整数',
APIServerErrorCode.NULL_APS: 'aps字段不能为空',
APIServerErrorCode.SEND_MORE_THAN_TEN_PER_MINUTE: '1分钟内发送次数超出10次',
APIServerErrorCode.WRONG_SIGNATURE: '签名不正确',
APIServerErrorCode.TIMESTAMP_EXPIRED: '时间戳已过期',
APIServerErrorCode.NULL_CONTENT: 'content内容不能为空',
APIServerErrorCode.WRONG_LAUNCH_FROM_DATE: 'launch_from/not_launch_from条件中的日期须小于发送日期',
APIServerErrorCode.WRONG_FILTER_FORMAT: 'filter格式不正确 ',
APIServerErrorCode.NULL_RPODUCTION_IOSCERT: '未上传生产证书,请到Web后台上传',
APIServerErrorCode.NULL_DEVELOPMENT_IOSCERT: '未上传开发证书,请到Web后台上传',
APIServerErrorCode.CERTIFICATE_EXPIRED: '证书已过期',
APIServerErrorCode.CERTIFICATE_EXPIRED_TIMER: '定时任务证书过期',
APIServerErrorCode.WRONG_TIMESPAN_FORMAT: '时间戳格式错误',
APIServerErrorCode.WRONG_UPLOAD_FILE: '文件上传失败',
APIServerErrorCode.WRONG_TIME_FORMAT: '时间格式必须是yyyy - MM - dd HH:mm:ss',
APIServerErrorCode.EXPIRED_TIME_TOLONG: '过期时间不能超过7天',
# NO.3000~3004
APIServerErrorCode.DATABASE_ERROR_ONE: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_TWO: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_THREE: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_FOUR: '数据库错误',
APIServerErrorCode.DATABASE_ERROR_FIVE: '数据库错误',
# NO.4000~4027
APIServerErrorCode.SYSTEM_ERROR: '系统错误',
APIServerErrorCode.SYSTEM_BUSY: '系统忙',
APIServerErrorCode.OPERATION_FAILED: '操作失败',
APIServerErrorCode.WRONG_APP_KEY_FORMAT: 'appkey格式错误',
APIServerErrorCode.WRONG_MESSAGE_TYPE_FORMAT: '消息类型格式错误',
APIServerErrorCode.WRONG_MSG_FORMAT: 'msg格式错误',
APIServerErrorCode.WRONG_BODY_FORMAT: 'body格式错误',
APIServerErrorCode.WRONG_DELIVER_POLICY_FORMAT: 'deliverPolicy格式错误',
APIServerErrorCode.WRONG_INVALID_TIME_FORMAT: '失效时间格式错误',
APIServerErrorCode.FULL_QUEUE: '单个服务器队列已满',
APIServerErrorCode.WRONG_DEVICE_NUMBER_FORMAT: '设备号格式错误',
APIServerErrorCode.INVALID_MESSAGE_EXPANDED_FIELD: '消息扩展字段无效',
APIServerErrorCode.NO_ACCESS_AUTHORITY: '没有权限访问',
APIServerErrorCode.ASYNCHRONOUS_SEND_MESSAGE_FAILED: '异步发送消息失败',
APIServerErrorCode.WRONG_APP_KEY_TO_DEVICE_TOKENS: 'appkey和device_tokens不对应',
APIServerErrorCode.NO_APPLICATION_INFO: '没有找到应用信息',
APIServerErrorCode.WRONG_FILE_CODE: '文件编码有误',
APIServerErrorCode.WRONG_FILE_TYPE: '文件类型有误',
APIServerErrorCode.WRONG_FILE_REMOTE_ADDRESS: '文件远程地址有误',
APIServerErrorCode.WRONG_FILE_DESCRIPTION: '文件描述信息有误',
APIServerErrorCode.WRONG_DEVICE_TOKEN: 'device_token有误(注意,友盟的device_token是严格的44位字符串)',
APIServerErrorCode.HSF_TIME_OUT: 'HSF异步服务超时',
APIServerErrorCode.APP_KEY_REGISTER: 'appkey已经注册',
APIServerErrorCode.SERVER_NET_ERROR: '服务器网络异常',
APIServerErrorCode.ILLEGAL_ACCESS: '非法访问',
APIServerErrorCode.DEVICE_TOKEN_ALL_FAILED: 'device-token全部失败',
APIServerErrorCode.DEVICE_TOKEN_PART_FAILED: 'device-token部分失败',
APIServerErrorCode.PULL_FILE_FAILED: '拉取文件失败',
# NO.5000~5009
APIServerErrorCode.DEVICE_TOKEN_ERROR: 'device_token错误',
APIServerErrorCode.NO_CERTIFICATE: '证书不存在',
APIServerErrorCode.UMENG_RESERVED_FIELD: 'p,d是umeng保留字段',
APIServerErrorCode.NULL_ALERT: 'alert字段不能为空',
APIServerErrorCode.WRONG_ALERT: 'alert只能是String类型',
APIServerErrorCode.WRONG_DEVICE_TOKEN_FORMAT: 'device_token格式错误',
APIServerErrorCode.CREATE_SOCKET_ERROR: '创建socket错误',
APIServerErrorCode.WRONG_CERTIFICATE_REVOKED: 'certificate_revoked错误',
APIServerErrorCode.WRONG_CERTIFICATE_UNKOWN: 'certificate_unkown错误',
APIServerErrorCode.WRONG_HANDSHAKE_FAILURE: 'handshake_failure错误',
} | en | 0.297701 | #!/usr/bin/env python # -*- coding: utf-8 -*- # Last modified: <NAME> (<EMAIL>) # NO.1000~1020 # NO.2000~2040 # NO.3000~3004 # NO.4000~4027 # NO.5000~5009 | 2.177773 | 2 |
python/remove-duplicate-letters.py | alirezaghey/leetcode-solutions | 3 | 6615432 | <reponame>alirezaghey/leetcode-solutions<gh_stars>1-10
class Solution:
# Time complexity: O(n) where n the length of the input string is
# Space complexity: O(1)
def removeDuplicateLetters(self, s: str) -> str:
last_index = {c:i for i, c in enumerate(s)}
res = []
for i, c in enumerate(s):
if c not in res:
while res and c < res[-1] and i < last_index[res[-1]]:
res.pop()
res.append(c)
return "".join(res) | class Solution:
# Time complexity: O(n) where n the length of the input string is
# Space complexity: O(1)
def removeDuplicateLetters(self, s: str) -> str:
last_index = {c:i for i, c in enumerate(s)}
res = []
for i, c in enumerate(s):
if c not in res:
while res and c < res[-1] and i < last_index[res[-1]]:
res.pop()
res.append(c)
return "".join(res) | en | 0.761723 | # Time complexity: O(n) where n the length of the input string is # Space complexity: O(1) | 3.631087 | 4 |
abstract_nas/abstract/depth_test.py | dumpmemory/google-research | 0 | 6615433 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for depth."""
from absl.testing import absltest as test
from abstract_nas.abstract import depth
from abstract_nas.model.concrete import new_graph
from abstract_nas.model.concrete import new_op
from abstract_nas.model.concrete import OpType
from abstract_nas.model.subgraph import replace_subgraph
from abstract_nas.model.subgraph import SubgraphModel
from abstract_nas.model.subgraph import SubgraphNode
from abstract_nas.zoo import cnn
class DepthTest(test.TestCase):
def setUp(self):
super().setUp()
self.graph, self.constants, _ = cnn.CifarNet()
self.subgraph_model = SubgraphModel(self.graph, self.constants, {}, {})
self.dp = depth.DepthProperty().infer(self.subgraph_model)
def test_infer(self):
depth_map = self.dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("input:0", depth_map)
self.assertLen(depth_map["input:0"], 1)
self.assertIn("fc/logits:0", depth_map["input:0"])
self.assertEqual(depth_map["input:0"]["fc/logits:0"], 7)
def test_mutate(self):
delta_max = 5
dp = depth.DepthProperty(p=1.0, delta_max=delta_max)
dp = dp.infer(self.subgraph_model).mutate()
depth_map = dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("input:0", depth_map)
self.assertLen(depth_map["input:0"], 1)
self.assertIn("fc/logits:0", depth_map["input:0"])
self.assertEqual(abs(7 - depth_map["input:0"]["fc/logits:0"]), delta_max)
def test_self_satisfy(self):
# The graph which generated the property should always satisfy the property.
self.assertTrue(self.dp.verify(self.subgraph_model))
def test_unsatisfy(self):
# This test removes the last dense layer, so the new graph should be less
# deep.
graph = new_graph(
input_names=["input"], output_names=["fc/relu"], ops=self.graph.ops)
subgraph_model = SubgraphModel(graph, self.constants, {}, {})
self.assertFalse(self.dp.verify(subgraph_model))
def test_satisfy(self):
# This test removes the last dense layer, so the old graph should be more
# deep.
ops = self.graph.ops[:-1]
ops[-1].name = "fc/logits"
graph = new_graph(
input_names=["input"], output_names=["fc/logits"], ops=ops)
subgraph_model = SubgraphModel(graph, self.constants, {}, {})
dp = depth.DepthProperty().infer(subgraph_model)
self.assertTrue(dp.verify(self.subgraph_model))
def test_rewire(self):
# orig: conv, relu, pool, conv, relu, pool, flatten, dense, relu, dense
# new: conv, relu, pool, conv, gelu, pool, flatten, dense, relu, dense
subgraph_spec = [
SubgraphNode(
op=new_op(
op_name="conv_layer1/conv/1",
op_type=OpType.CONV,
op_kwargs={
"features": 64,
"kernel_size": [1, 1]
},
input_names=["conv_layer0/avg_pool"]),),
SubgraphNode(
op=new_op(
op_name="conv_layer1/gelu/1",
op_type=OpType.GELU,
input_names=["conv_layer1/conv/1"]),
output_names=["conv_layer1/relu"])
]
graph = replace_subgraph(self.graph, subgraph_spec)
subgraph_model = SubgraphModel(graph, self.constants, {}, {}, subgraph_spec)
dp = depth.DepthProperty().infer(subgraph_model)
depth_map = dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("conv_layer0/avg_pool:0", depth_map)
self.assertLen(depth_map["conv_layer0/avg_pool:0"], 2)
self.assertIn("conv_layer1/relu:0", depth_map["conv_layer0/avg_pool:0"])
self.assertEqual(
depth_map["conv_layer0/avg_pool:0"]["conv_layer1/relu:0"], 1)
self.assertIn("conv_layer1/gelu/1:0", depth_map["conv_layer0/avg_pool:0"])
self.assertEqual(
depth_map["conv_layer0/avg_pool:0"]["conv_layer1/gelu/1:0"], 1)
def test_multi_input(self):
ops = [
new_op(
op_name="dense0",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu0",
op_type=OpType.RELU,
input_names=["dense0"]),
new_op(
op_name="dense1",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu1",
op_type=OpType.RELU,
input_names=["dense1"]),
new_op(
op_name="dense2",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu2",
op_type=OpType.RELU,
input_names=["dense2"]),
new_op(
op_name="add0",
op_type=OpType.ADD,
input_names=["relu0", "relu1"]),
new_op(
op_name="add1",
op_type=OpType.ADD,
input_names=["relu1", "relu2"]),
]
graph = new_graph(
input_names=["input"], output_names=["add0", "add1"], ops=ops)
subgraph_spec = [
SubgraphNode(
op=new_op(
op_name="relu0",
op_type=OpType.RELU,
input_names=["dense0"])),
SubgraphNode(
op=new_op(
op_name="relu1",
op_type=OpType.RELU,
input_names=["dense1"])),
SubgraphNode(
op=new_op(
op_name="relu2",
op_type=OpType.RELU,
input_names=["dense2"])),
SubgraphNode(
op=new_op(
op_name="add0",
op_type=OpType.ADD,
input_names=["relu0", "relu1"]),
output_names=["add0"]),
SubgraphNode(
op=new_op(
op_name="add1",
op_type=OpType.ADD,
input_names=["relu1", "relu2"]),
output_names=["add1"]),
]
replaced_graph = replace_subgraph(graph, subgraph_spec)
subgraph_model = SubgraphModel(replaced_graph, {}, {}, {}, subgraph_spec)
dp = depth.DepthProperty().infer(subgraph_model)
depth_map = dp.depth_map
self.assertLen(depth_map, 3)
self.assertIn("dense0:0", depth_map)
self.assertIn("dense1:0", depth_map)
self.assertIn("dense2:0", depth_map)
self.assertLen(depth_map["dense0:0"], 1)
self.assertEqual(depth_map["dense0:0"]["add0:0"], 2)
self.assertLen(depth_map["dense1:0"], 2)
self.assertEqual(depth_map["dense1:0"]["add0:0"], 2)
self.assertEqual(depth_map["dense1:0"]["add1:0"], 2)
self.assertLen(depth_map["dense2:0"], 1)
self.assertEqual(depth_map["dense2:0"]["add1:0"], 2)
if __name__ == "__main__":
test.main()
| # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for depth."""
from absl.testing import absltest as test
from abstract_nas.abstract import depth
from abstract_nas.model.concrete import new_graph
from abstract_nas.model.concrete import new_op
from abstract_nas.model.concrete import OpType
from abstract_nas.model.subgraph import replace_subgraph
from abstract_nas.model.subgraph import SubgraphModel
from abstract_nas.model.subgraph import SubgraphNode
from abstract_nas.zoo import cnn
class DepthTest(test.TestCase):
def setUp(self):
super().setUp()
self.graph, self.constants, _ = cnn.CifarNet()
self.subgraph_model = SubgraphModel(self.graph, self.constants, {}, {})
self.dp = depth.DepthProperty().infer(self.subgraph_model)
def test_infer(self):
depth_map = self.dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("input:0", depth_map)
self.assertLen(depth_map["input:0"], 1)
self.assertIn("fc/logits:0", depth_map["input:0"])
self.assertEqual(depth_map["input:0"]["fc/logits:0"], 7)
def test_mutate(self):
delta_max = 5
dp = depth.DepthProperty(p=1.0, delta_max=delta_max)
dp = dp.infer(self.subgraph_model).mutate()
depth_map = dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("input:0", depth_map)
self.assertLen(depth_map["input:0"], 1)
self.assertIn("fc/logits:0", depth_map["input:0"])
self.assertEqual(abs(7 - depth_map["input:0"]["fc/logits:0"]), delta_max)
def test_self_satisfy(self):
# The graph which generated the property should always satisfy the property.
self.assertTrue(self.dp.verify(self.subgraph_model))
def test_unsatisfy(self):
# This test removes the last dense layer, so the new graph should be less
# deep.
graph = new_graph(
input_names=["input"], output_names=["fc/relu"], ops=self.graph.ops)
subgraph_model = SubgraphModel(graph, self.constants, {}, {})
self.assertFalse(self.dp.verify(subgraph_model))
def test_satisfy(self):
# This test removes the last dense layer, so the old graph should be more
# deep.
ops = self.graph.ops[:-1]
ops[-1].name = "fc/logits"
graph = new_graph(
input_names=["input"], output_names=["fc/logits"], ops=ops)
subgraph_model = SubgraphModel(graph, self.constants, {}, {})
dp = depth.DepthProperty().infer(subgraph_model)
self.assertTrue(dp.verify(self.subgraph_model))
def test_rewire(self):
# orig: conv, relu, pool, conv, relu, pool, flatten, dense, relu, dense
# new: conv, relu, pool, conv, gelu, pool, flatten, dense, relu, dense
subgraph_spec = [
SubgraphNode(
op=new_op(
op_name="conv_layer1/conv/1",
op_type=OpType.CONV,
op_kwargs={
"features": 64,
"kernel_size": [1, 1]
},
input_names=["conv_layer0/avg_pool"]),),
SubgraphNode(
op=new_op(
op_name="conv_layer1/gelu/1",
op_type=OpType.GELU,
input_names=["conv_layer1/conv/1"]),
output_names=["conv_layer1/relu"])
]
graph = replace_subgraph(self.graph, subgraph_spec)
subgraph_model = SubgraphModel(graph, self.constants, {}, {}, subgraph_spec)
dp = depth.DepthProperty().infer(subgraph_model)
depth_map = dp.depth_map
self.assertLen(depth_map, 1)
self.assertIn("conv_layer0/avg_pool:0", depth_map)
self.assertLen(depth_map["conv_layer0/avg_pool:0"], 2)
self.assertIn("conv_layer1/relu:0", depth_map["conv_layer0/avg_pool:0"])
self.assertEqual(
depth_map["conv_layer0/avg_pool:0"]["conv_layer1/relu:0"], 1)
self.assertIn("conv_layer1/gelu/1:0", depth_map["conv_layer0/avg_pool:0"])
self.assertEqual(
depth_map["conv_layer0/avg_pool:0"]["conv_layer1/gelu/1:0"], 1)
def test_multi_input(self):
ops = [
new_op(
op_name="dense0",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu0",
op_type=OpType.RELU,
input_names=["dense0"]),
new_op(
op_name="dense1",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu1",
op_type=OpType.RELU,
input_names=["dense1"]),
new_op(
op_name="dense2",
op_type=OpType.DENSE,
op_kwargs={"features": 32},
input_names=["input"]),
new_op(
op_name="relu2",
op_type=OpType.RELU,
input_names=["dense2"]),
new_op(
op_name="add0",
op_type=OpType.ADD,
input_names=["relu0", "relu1"]),
new_op(
op_name="add1",
op_type=OpType.ADD,
input_names=["relu1", "relu2"]),
]
graph = new_graph(
input_names=["input"], output_names=["add0", "add1"], ops=ops)
subgraph_spec = [
SubgraphNode(
op=new_op(
op_name="relu0",
op_type=OpType.RELU,
input_names=["dense0"])),
SubgraphNode(
op=new_op(
op_name="relu1",
op_type=OpType.RELU,
input_names=["dense1"])),
SubgraphNode(
op=new_op(
op_name="relu2",
op_type=OpType.RELU,
input_names=["dense2"])),
SubgraphNode(
op=new_op(
op_name="add0",
op_type=OpType.ADD,
input_names=["relu0", "relu1"]),
output_names=["add0"]),
SubgraphNode(
op=new_op(
op_name="add1",
op_type=OpType.ADD,
input_names=["relu1", "relu2"]),
output_names=["add1"]),
]
replaced_graph = replace_subgraph(graph, subgraph_spec)
subgraph_model = SubgraphModel(replaced_graph, {}, {}, {}, subgraph_spec)
dp = depth.DepthProperty().infer(subgraph_model)
depth_map = dp.depth_map
self.assertLen(depth_map, 3)
self.assertIn("dense0:0", depth_map)
self.assertIn("dense1:0", depth_map)
self.assertIn("dense2:0", depth_map)
self.assertLen(depth_map["dense0:0"], 1)
self.assertEqual(depth_map["dense0:0"]["add0:0"], 2)
self.assertLen(depth_map["dense1:0"], 2)
self.assertEqual(depth_map["dense1:0"]["add0:0"], 2)
self.assertEqual(depth_map["dense1:0"]["add1:0"], 2)
self.assertLen(depth_map["dense2:0"], 1)
self.assertEqual(depth_map["dense2:0"]["add1:0"], 2)
if __name__ == "__main__":
test.main()
| en | 0.855671 | # coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for depth. # The graph which generated the property should always satisfy the property. # This test removes the last dense layer, so the new graph should be less # deep. # This test removes the last dense layer, so the old graph should be more # deep. # orig: conv, relu, pool, conv, relu, pool, flatten, dense, relu, dense # new: conv, relu, pool, conv, gelu, pool, flatten, dense, relu, dense | 2.314386 | 2 |
telegram_alert.py | shubhadeepmandal394/remote-temperature-monitoring | 1 | 6615434 | import requests # for making HTTP requests
import json # library for handling JSON data
import time # module for sleep operation
from boltiot import Bolt # importing Bolt from boltiot module
import conf # config file
mybolt = Bolt(conf.bolt_api_key, conf.device_id)
def get_sensor_value_from_pin(pin):
"""Returns the sensor value. Returns -999 if request fails"""
try:
response = mybolt.analogRead(pin)
data = json.loads(response)
if data["success"] != 1:
print("Request not successfull")
print("This is the response->", data)
return -999
sensor_value = int(data["value"])
return sensor_value
except Exception as e:
print("Something went wrong when returning the sensor value")
print(e)
return -999
def send_telegram_message(message):
"""Sends message via Telegram"""
url = "https://api.telegram.org/" + conf.telegram_bot_id + "/sendMessage"
data = {
"chat_id": conf.telegram_chat_id,
"text": message
}
try:
response = requests.request(
"POST",
url,
params=data
)
print("This is the Telegram URL")
print(url)
print("This is the Telegram response")
print(response.text)
telegram_data = json.loads(response.text)
return telegram_data["ok"]
except Exception as e:
print("An error occurred in sending the alert message via Telegram")
print(e)
return False
while True:
# Step 1
sensor_value = get_sensor_value_from_pin("A0")
print("The current sensor value is:", sensor_value)
# Step 2
if sensor_value == -999:
print("Request was unsuccessfull. Skipping.")
time.sleep(10)
continue
# Step 3
if sensor_value >= conf.threshold:
print("Sensor value has exceeded threshold")
message = "Alert! Sensor value has exceeded " + str(conf.threshold) + \
". The current value is " + str(sensor_value)
telegram_status = send_telegram_message(message)
print("This is the Telegram status:", telegram_status)
# Step 4
time.sleep(10)
| import requests # for making HTTP requests
import json # library for handling JSON data
import time # module for sleep operation
from boltiot import Bolt # importing Bolt from boltiot module
import conf # config file
mybolt = Bolt(conf.bolt_api_key, conf.device_id)
def get_sensor_value_from_pin(pin):
"""Returns the sensor value. Returns -999 if request fails"""
try:
response = mybolt.analogRead(pin)
data = json.loads(response)
if data["success"] != 1:
print("Request not successfull")
print("This is the response->", data)
return -999
sensor_value = int(data["value"])
return sensor_value
except Exception as e:
print("Something went wrong when returning the sensor value")
print(e)
return -999
def send_telegram_message(message):
"""Sends message via Telegram"""
url = "https://api.telegram.org/" + conf.telegram_bot_id + "/sendMessage"
data = {
"chat_id": conf.telegram_chat_id,
"text": message
}
try:
response = requests.request(
"POST",
url,
params=data
)
print("This is the Telegram URL")
print(url)
print("This is the Telegram response")
print(response.text)
telegram_data = json.loads(response.text)
return telegram_data["ok"]
except Exception as e:
print("An error occurred in sending the alert message via Telegram")
print(e)
return False
while True:
# Step 1
sensor_value = get_sensor_value_from_pin("A0")
print("The current sensor value is:", sensor_value)
# Step 2
if sensor_value == -999:
print("Request was unsuccessfull. Skipping.")
time.sleep(10)
continue
# Step 3
if sensor_value >= conf.threshold:
print("Sensor value has exceeded threshold")
message = "Alert! Sensor value has exceeded " + str(conf.threshold) + \
". The current value is " + str(sensor_value)
telegram_status = send_telegram_message(message)
print("This is the Telegram status:", telegram_status)
# Step 4
time.sleep(10)
| en | 0.402022 | # for making HTTP requests # library for handling JSON data # module for sleep operation # importing Bolt from boltiot module # config file Returns the sensor value. Returns -999 if request fails Sends message via Telegram # Step 1 # Step 2 # Step 3 # Step 4 | 3.087362 | 3 |
tracemap/management/commands/importasmspecieslist.py | parsingphase/batbox | 0 | 6615435 | import csv
from django.core.management.base import BaseCommand
from tracemap.models import Species
KEY_ORDER = 'order'
KEY_GENUS = 'genus'
KEY_SPECIES = 'specificepithet'
KEY_COMMON_NAME = 'maincommonname'
KEY_INTERNAL_ID = 'id'
# Use various prior knowledge if heuristics aren't adequate
canon_genus_map = {
'myotis': {
'canon_genus_3code': 'MYO'
},
'nyctalus': {
'canon_genus_3code': 'NYC'
},
}
class Command(BaseCommand):
help = 'Load CSV file from https://mammaldiversity.org into database'
def add_arguments(self, parser):
parser.add_argument('filename', type=str, help='Name of the file or directory to import')
def handle(self, *args, **kwargs):
required_fields = [KEY_GENUS, KEY_SPECIES, KEY_COMMON_NAME, KEY_ORDER, KEY_INTERNAL_ID]
filename = kwargs['filename']
column_map = {}
i = 0
with open(filename, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
row = []
while len(row) == 0:
row = next(reader)
for index in range(0, len(row)):
column_map[row[index].lower()] = index
print(column_map)
for field in required_fields:
if field not in column_map.keys():
print(f"Can't find all required headers in CSV: {', '.join(required_fields)}")
exit(1)
o = column_map[KEY_ORDER]
g = column_map[KEY_GENUS]
s = column_map[KEY_SPECIES]
c = column_map[KEY_COMMON_NAME]
m = column_map[KEY_INTERNAL_ID]
new = 0
for row in reader:
i += 1
if len(row) > o and row[o].lower() == 'chiroptera': # non-blank
species_record = None
genus = row[g]
species = row[s]
common_name = row[c]
mdd_id = row[m]
existing_species = Species.objects.filter(genus=genus, species=species)
hits = len(existing_species)
if hits == 0:
species_record = Species()
species_record.species = species
species_record.genus = genus
species_record.common_name = common_name
print(f'Added {genus} {species} ({common_name})')
new += 1
elif hits == 1:
species_record = existing_species[0]
print(f'Found {genus} {species}, potentially updating')
else:
print(f'Already have multiple hits for {genus} {species}')
if species_record is not None:
if len(mdd_id):
species_record.mdd_id = mdd_id
genus_lower = genus.lower()
if genus_lower in canon_genus_map:
for key in canon_genus_map[genus_lower]:
species_record[key] = canon_genus_map[genus_lower][key]
species_record.save()
print(f'Read {i} rows, found {new} new bats')
| import csv
from django.core.management.base import BaseCommand
from tracemap.models import Species
KEY_ORDER = 'order'
KEY_GENUS = 'genus'
KEY_SPECIES = 'specificepithet'
KEY_COMMON_NAME = 'maincommonname'
KEY_INTERNAL_ID = 'id'
# Use various prior knowledge if heuristics aren't adequate
canon_genus_map = {
'myotis': {
'canon_genus_3code': 'MYO'
},
'nyctalus': {
'canon_genus_3code': 'NYC'
},
}
class Command(BaseCommand):
help = 'Load CSV file from https://mammaldiversity.org into database'
def add_arguments(self, parser):
parser.add_argument('filename', type=str, help='Name of the file or directory to import')
def handle(self, *args, **kwargs):
required_fields = [KEY_GENUS, KEY_SPECIES, KEY_COMMON_NAME, KEY_ORDER, KEY_INTERNAL_ID]
filename = kwargs['filename']
column_map = {}
i = 0
with open(filename, newline='', encoding='utf-8') as csvfile:
reader = csv.reader(csvfile)
row = []
while len(row) == 0:
row = next(reader)
for index in range(0, len(row)):
column_map[row[index].lower()] = index
print(column_map)
for field in required_fields:
if field not in column_map.keys():
print(f"Can't find all required headers in CSV: {', '.join(required_fields)}")
exit(1)
o = column_map[KEY_ORDER]
g = column_map[KEY_GENUS]
s = column_map[KEY_SPECIES]
c = column_map[KEY_COMMON_NAME]
m = column_map[KEY_INTERNAL_ID]
new = 0
for row in reader:
i += 1
if len(row) > o and row[o].lower() == 'chiroptera': # non-blank
species_record = None
genus = row[g]
species = row[s]
common_name = row[c]
mdd_id = row[m]
existing_species = Species.objects.filter(genus=genus, species=species)
hits = len(existing_species)
if hits == 0:
species_record = Species()
species_record.species = species
species_record.genus = genus
species_record.common_name = common_name
print(f'Added {genus} {species} ({common_name})')
new += 1
elif hits == 1:
species_record = existing_species[0]
print(f'Found {genus} {species}, potentially updating')
else:
print(f'Already have multiple hits for {genus} {species}')
if species_record is not None:
if len(mdd_id):
species_record.mdd_id = mdd_id
genus_lower = genus.lower()
if genus_lower in canon_genus_map:
for key in canon_genus_map[genus_lower]:
species_record[key] = canon_genus_map[genus_lower][key]
species_record.save()
print(f'Read {i} rows, found {new} new bats')
| en | 0.773896 | # Use various prior knowledge if heuristics aren't adequate # non-blank | 2.472606 | 2 |
src/results_view.py | tobiasvanderwerff/fingerspelling-detection | 0 | 6615436 | <reponame>tobiasvanderwerff/fingerspelling-detection
from typing import List, Dict
from collections import Counter
def format_secs_nicely(secs):
mins = int(secs // 60)
secs = secs % 60
return "{}m {:.3f}s".format(mins, secs)
def show_predicted_segments(predicted_segments, video_fps):
segment_timeslots = timeslots_from_segments(predicted_segments, video_fps)
print("{} possible fingerspelling segments detected:".format(len(segment_timeslots)))
for i, segment in enumerate(segment_timeslots):
start_time = format_secs_nicely(segment[0])
end_time = format_secs_nicely(segment[1])
print("Segment {}: {} - {}".format(i+1, start_time, end_time))
def show_video_results(video_path, video_signer, n_correct_segments, n_true_segments, n_clusters, pr1, rec1, pr2, rec2,
tp, fp, fn):
print("{} ({}): Pr: {:.4f}, Rec: {:.4f} -- TP: {}, FP: {}, FN: {} -- {} segments detected, "
"{}/{} correctly detected ({:.1f}%)".format(video_path, video_signer, pr1, rec1, tp, fp, fn,
n_clusters - 1, n_correct_segments, n_true_segments,
rec1 * 100))
def summarize_results(recall_per_video1: Dict[str, List[float]], precision_per_video1: Dict[str, List[float]],
recall_per_video2: Dict[str, List[float]], precision_per_video2: Dict[str, List[float]],
template_match_cnt: Counter, no_match_cnt: int, preselection_rejection_cnt: int,
total_n_frames: int, eps, min_samples, movement_threshold, template_match_thresh, input_path_name,
template_dir_name):
print("******************************************************")
print("Input set:\t{}".format(input_path_name))
print("Template set :\t{}\n".format(template_dir_name))
print(
"Parameters:\n"
"eps: %d\n"
"min_samples: %d\n"
"movement_threshold: %.3f\n"
"template_match_thresh: %d \n" % (eps, min_samples, movement_threshold, template_match_thresh)
)
print("Metric 1: Detected segments")
print("s1 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video1["s1"]) / len(precision_per_video1["s1"]),
sum(recall_per_video1["s1"]) / len(recall_per_video1["s1"])))
print("s2 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video1["s2"]) / len(precision_per_video1["s2"]),
sum(recall_per_video1["s2"]) / len(recall_per_video1["s2"])))
print("total -- AP: %.3f, AR: %.3f\n" % ((sum(precision_per_video1["s2"]) + sum(precision_per_video1["s1"])) /
(len(precision_per_video1["s2"]) + len(precision_per_video1["s1"])),
(sum(recall_per_video1["s2"]) + sum(recall_per_video1["s1"])) /
(len(recall_per_video1["s2"]) + len(recall_per_video1["s1"]))))
print("Metric 2: Detected frames")
print("s1 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video2["s1"]) / len(precision_per_video2["s1"]),
sum(recall_per_video2["s1"]) / len(recall_per_video2["s1"])))
print("s2 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video2["s2"]) / len(precision_per_video2["s2"]),
sum(recall_per_video2["s2"]) / len(recall_per_video2["s2"])))
print("AP: %.3f, AR: %.3f\n" % ((sum(precision_per_video2["s2"]) + sum(precision_per_video2["s1"])) /
(len(precision_per_video2["s2"]) + len(precision_per_video2["s1"])),
(sum(recall_per_video2["s2"]) + sum(recall_per_video2["s1"])) /
(len(recall_per_video2["s2"]) + len(recall_per_video2["s1"]))))
total_matches = sum(template_match_cnt.values())
print("Matches per template:")
for k, v in sorted(template_match_cnt.items(), key=lambda x: x[1]):
print("{}: {} ({:.2f}%)".format(k, v, int(v / total_matches * 100)))
print("\nNo of rejected frames (preselection): {} ({}%)".format(preselection_rejection_cnt,
int(preselection_rejection_cnt /
total_n_frames * 100)))
print("No of rejected frames (template_match_thresh): {} ({}%)".format(no_match_cnt,
int(no_match_cnt / total_n_frames * 100)))
print("******************************************************\n")
def timeslots_from_segments(segments: List[int], fps):
""" Returns time slots (start and end of segments) for each segment in the given list.
Segments is a list of binary numbers indicating for each frame whether it is a segment frame (1) or not (0). """
segment_timeslots, segment_found = [], False
ln = len(segments)
for i, frame_no in enumerate(segments):
if frame_no == 1: # Segment frame
if not segment_found:
segment_start_idx = i
segment_found = True
elif i == ln - 1 or segments[i+1] == 0:
segment_end_idx = i
start_time = segment_start_idx / fps
end_time = segment_end_idx / fps
segment_timeslots.append((start_time, end_time))
segment_found = False
return segment_timeslots
| from typing import List, Dict
from collections import Counter
def format_secs_nicely(secs):
mins = int(secs // 60)
secs = secs % 60
return "{}m {:.3f}s".format(mins, secs)
def show_predicted_segments(predicted_segments, video_fps):
segment_timeslots = timeslots_from_segments(predicted_segments, video_fps)
print("{} possible fingerspelling segments detected:".format(len(segment_timeslots)))
for i, segment in enumerate(segment_timeslots):
start_time = format_secs_nicely(segment[0])
end_time = format_secs_nicely(segment[1])
print("Segment {}: {} - {}".format(i+1, start_time, end_time))
def show_video_results(video_path, video_signer, n_correct_segments, n_true_segments, n_clusters, pr1, rec1, pr2, rec2,
tp, fp, fn):
print("{} ({}): Pr: {:.4f}, Rec: {:.4f} -- TP: {}, FP: {}, FN: {} -- {} segments detected, "
"{}/{} correctly detected ({:.1f}%)".format(video_path, video_signer, pr1, rec1, tp, fp, fn,
n_clusters - 1, n_correct_segments, n_true_segments,
rec1 * 100))
def summarize_results(recall_per_video1: Dict[str, List[float]], precision_per_video1: Dict[str, List[float]],
recall_per_video2: Dict[str, List[float]], precision_per_video2: Dict[str, List[float]],
template_match_cnt: Counter, no_match_cnt: int, preselection_rejection_cnt: int,
total_n_frames: int, eps, min_samples, movement_threshold, template_match_thresh, input_path_name,
template_dir_name):
print("******************************************************")
print("Input set:\t{}".format(input_path_name))
print("Template set :\t{}\n".format(template_dir_name))
print(
"Parameters:\n"
"eps: %d\n"
"min_samples: %d\n"
"movement_threshold: %.3f\n"
"template_match_thresh: %d \n" % (eps, min_samples, movement_threshold, template_match_thresh)
)
print("Metric 1: Detected segments")
print("s1 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video1["s1"]) / len(precision_per_video1["s1"]),
sum(recall_per_video1["s1"]) / len(recall_per_video1["s1"])))
print("s2 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video1["s2"]) / len(precision_per_video1["s2"]),
sum(recall_per_video1["s2"]) / len(recall_per_video1["s2"])))
print("total -- AP: %.3f, AR: %.3f\n" % ((sum(precision_per_video1["s2"]) + sum(precision_per_video1["s1"])) /
(len(precision_per_video1["s2"]) + len(precision_per_video1["s1"])),
(sum(recall_per_video1["s2"]) + sum(recall_per_video1["s1"])) /
(len(recall_per_video1["s2"]) + len(recall_per_video1["s1"]))))
print("Metric 2: Detected frames")
print("s1 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video2["s1"]) / len(precision_per_video2["s1"]),
sum(recall_per_video2["s1"]) / len(recall_per_video2["s1"])))
print("s2 -- AP: %.3f, AR: %.3f" % (sum(precision_per_video2["s2"]) / len(precision_per_video2["s2"]),
sum(recall_per_video2["s2"]) / len(recall_per_video2["s2"])))
print("AP: %.3f, AR: %.3f\n" % ((sum(precision_per_video2["s2"]) + sum(precision_per_video2["s1"])) /
(len(precision_per_video2["s2"]) + len(precision_per_video2["s1"])),
(sum(recall_per_video2["s2"]) + sum(recall_per_video2["s1"])) /
(len(recall_per_video2["s2"]) + len(recall_per_video2["s1"]))))
total_matches = sum(template_match_cnt.values())
print("Matches per template:")
for k, v in sorted(template_match_cnt.items(), key=lambda x: x[1]):
print("{}: {} ({:.2f}%)".format(k, v, int(v / total_matches * 100)))
print("\nNo of rejected frames (preselection): {} ({}%)".format(preselection_rejection_cnt,
int(preselection_rejection_cnt /
total_n_frames * 100)))
print("No of rejected frames (template_match_thresh): {} ({}%)".format(no_match_cnt,
int(no_match_cnt / total_n_frames * 100)))
print("******************************************************\n")
def timeslots_from_segments(segments: List[int], fps):
""" Returns time slots (start and end of segments) for each segment in the given list.
Segments is a list of binary numbers indicating for each frame whether it is a segment frame (1) or not (0). """
segment_timeslots, segment_found = [], False
ln = len(segments)
for i, frame_no in enumerate(segments):
if frame_no == 1: # Segment frame
if not segment_found:
segment_start_idx = i
segment_found = True
elif i == ln - 1 or segments[i+1] == 0:
segment_end_idx = i
start_time = segment_start_idx / fps
end_time = segment_end_idx / fps
segment_timeslots.append((start_time, end_time))
segment_found = False
return segment_timeslots | en | 0.728156 | Returns time slots (start and end of segments) for each segment in the given list. Segments is a list of binary numbers indicating for each frame whether it is a segment frame (1) or not (0). # Segment frame | 2.833522 | 3 |
examples/supla_example.py | balloob/pychromecast | 1,874 | 6615437 | <gh_stars>1000+
"""
Example on how to use the Supla Controller
"""
# pylint: disable=invalid-name
import logging
from time import sleep
import sys
import requests
from bs4 import BeautifulSoup # pylint: disable=import-error
import pychromecast
from pychromecast import quick_play
# Change to the name of your Chromecast
CAST_NAME = "Kitchen Speaker"
# Change to the video id of the YouTube video
# video id is the last part of the url http://youtube.com/watch?v=video_id
PROGRAM = "aamulypsy"
result = requests.get(f"https://www.supla.fi/ohjelmat/{PROGRAM}")
soup = BeautifulSoup(result.content)
MEDIA_ID = soup.select('a[title*="Koko Shitti"]')[0]["href"].split("/")[-1]
print(MEDIA_ID)
logging.basicConfig(level=logging.DEBUG)
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[CAST_NAME])
if not chromecasts:
print(f'No chromecast with name "{CAST_NAME}" discovered')
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
app_name = "supla"
app_data = {
"media_id": MEDIA_ID,
}
quick_play.quick_play(cast, app_name, app_data)
sleep(10)
| """
Example on how to use the Supla Controller
"""
# pylint: disable=invalid-name
import logging
from time import sleep
import sys
import requests
from bs4 import BeautifulSoup # pylint: disable=import-error
import pychromecast
from pychromecast import quick_play
# Change to the name of your Chromecast
CAST_NAME = "Kitchen Speaker"
# Change to the video id of the YouTube video
# video id is the last part of the url http://youtube.com/watch?v=video_id
PROGRAM = "aamulypsy"
result = requests.get(f"https://www.supla.fi/ohjelmat/{PROGRAM}")
soup = BeautifulSoup(result.content)
MEDIA_ID = soup.select('a[title*="Koko Shitti"]')[0]["href"].split("/")[-1]
print(MEDIA_ID)
logging.basicConfig(level=logging.DEBUG)
chromecasts, browser = pychromecast.get_listed_chromecasts(friendly_names=[CAST_NAME])
if not chromecasts:
print(f'No chromecast with name "{CAST_NAME}" discovered')
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
app_name = "supla"
app_data = {
"media_id": MEDIA_ID,
}
quick_play.quick_play(cast, app_name, app_data)
sleep(10) | en | 0.648666 | Example on how to use the Supla Controller # pylint: disable=invalid-name # pylint: disable=import-error # Change to the name of your Chromecast # Change to the video id of the YouTube video # video id is the last part of the url http://youtube.com/watch?v=video_id # Start socket client's worker thread and wait for initial status update | 2.647843 | 3 |
floris/wake_combination.py | tonyinme/FLORIS | 0 | 6615438 | <reponame>tonyinme/FLORIS
# Copyright 2017 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
class WakeCombination():
def __init__(self, typeString):
self.typeString = typeString
typeMap = {
"fls": self._fls,
"sosfs": self._sosfs,
}
self.__combinationFunction = typeMap.get(self.typeString, None)
def combine(self, u_field, u_wake):
return self.__combinationFunction(u_field, u_wake)
# private functions defining the wake combinations
# u_field: the modified flow field without u_wake
# u_wake: the wake to add into the rest of the flow field
#
# the following functions return u_field with u_wake incorporated
# freestream linear superposition
def _fls(self, u_field, u_wake):
return u_field + u_wake
# sum of squares freestream superposition
def _sosfs(self, u_field, u_wake):
return np.hypot(u_wake, u_field)
| # Copyright 2017 NREL
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import numpy as np
class WakeCombination():
def __init__(self, typeString):
self.typeString = typeString
typeMap = {
"fls": self._fls,
"sosfs": self._sosfs,
}
self.__combinationFunction = typeMap.get(self.typeString, None)
def combine(self, u_field, u_wake):
return self.__combinationFunction(u_field, u_wake)
# private functions defining the wake combinations
# u_field: the modified flow field without u_wake
# u_wake: the wake to add into the rest of the flow field
#
# the following functions return u_field with u_wake incorporated
# freestream linear superposition
def _fls(self, u_field, u_wake):
return u_field + u_wake
# sum of squares freestream superposition
def _sosfs(self, u_field, u_wake):
return np.hypot(u_wake, u_field) | en | 0.810801 | # Copyright 2017 NREL # Licensed under the Apache License, Version 2.0 (the "License"); you may not use # this file except in compliance with the License. You may obtain a copy of the # License at http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # private functions defining the wake combinations # u_field: the modified flow field without u_wake # u_wake: the wake to add into the rest of the flow field # # the following functions return u_field with u_wake incorporated # freestream linear superposition # sum of squares freestream superposition | 2.301161 | 2 |
oops_fhir/r4/code_system/document_relationship_type.py | Mikuana/oops_fhir | 0 | 6615439 | from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["DocumentRelationshipType"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class DocumentRelationshipType:
"""
DocumentRelationshipType
The type of relationship between documents.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/document-relationship-type
"""
replaces = CodeSystemConcept(
{
"code": "replaces",
"definition": "This document logically replaces or supersedes the target document.",
"display": "Replaces",
}
)
"""
Replaces
This document logically replaces or supersedes the target document.
"""
transforms = CodeSystemConcept(
{
"code": "transforms",
"definition": "This document was generated by transforming the target document (e.g. format or language conversion).",
"display": "Transforms",
}
)
"""
Transforms
This document was generated by transforming the target document (e.g. format or language conversion).
"""
signs = CodeSystemConcept(
{
"code": "signs",
"definition": "This document is a signature of the target document.",
"display": "Signs",
}
)
"""
Signs
This document is a signature of the target document.
"""
appends = CodeSystemConcept(
{
"code": "appends",
"definition": "This document adds additional information to the target document.",
"display": "Appends",
}
)
"""
Appends
This document adds additional information to the target document.
"""
class Meta:
resource = _resource
| from pathlib import Path
from fhir.resources.codesystem import CodeSystem
from oops_fhir.utils import CodeSystemConcept
__all__ = ["DocumentRelationshipType"]
_resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json"))
class DocumentRelationshipType:
"""
DocumentRelationshipType
The type of relationship between documents.
Status: draft - Version: 4.0.1
Copyright None
http://hl7.org/fhir/document-relationship-type
"""
replaces = CodeSystemConcept(
{
"code": "replaces",
"definition": "This document logically replaces or supersedes the target document.",
"display": "Replaces",
}
)
"""
Replaces
This document logically replaces or supersedes the target document.
"""
transforms = CodeSystemConcept(
{
"code": "transforms",
"definition": "This document was generated by transforming the target document (e.g. format or language conversion).",
"display": "Transforms",
}
)
"""
Transforms
This document was generated by transforming the target document (e.g. format or language conversion).
"""
signs = CodeSystemConcept(
{
"code": "signs",
"definition": "This document is a signature of the target document.",
"display": "Signs",
}
)
"""
Signs
This document is a signature of the target document.
"""
appends = CodeSystemConcept(
{
"code": "appends",
"definition": "This document adds additional information to the target document.",
"display": "Appends",
}
)
"""
Appends
This document adds additional information to the target document.
"""
class Meta:
resource = _resource
| en | 0.724352 | DocumentRelationshipType The type of relationship between documents. Status: draft - Version: 4.0.1 Copyright None http://hl7.org/fhir/document-relationship-type Replaces This document logically replaces or supersedes the target document. Transforms This document was generated by transforming the target document (e.g. format or language conversion). Signs This document is a signature of the target document. Appends This document adds additional information to the target document. | 2.433215 | 2 |
hack_itau_quant/optimization/hrp.py | turing-usp/hack-itau-quant | 2 | 6615440 | <reponame>turing-usp/hack-itau-quant
# Based on https://medium.com/turing-talks/otimiza%C3%A7%C3%A3o-de-investimentos-com-intelig%C3%AAncia-artificial-548cf34dad4d
import seaborn as sns
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import numpy as np
class HRP:
def __init__(self, cov_matrix: pd.DataFrame):
self._cov_matriz = cov_matrix
self._columns = cov_matrix.columns.to_list()
def optimize(self):
seriation_columns = self._matrix_seriation()
weights = self._get_weights(seriation_columns)
return weights
def _matrix_seriation(self):
dendogram = sns.clustermap(
self._cov_matriz, method='ward', metric='euclidean')
seriation_columns = dendogram.dendrogram_col.reordered_ind
seriation_columns = [self._columns[index]
for index in seriation_columns]
return seriation_columns
def _get_weights(self, seriation_columns):
# Inicialização de weights
weights = pd.Series(1, index=seriation_columns)
parities = [seriation_columns]
while len(parities) > 0:
parities = [cluster[start:end]
for cluster in parities
for start, end in ((0, len(cluster) // 2), (len(cluster) // 2, len(cluster)))
if len(cluster) > 1]
for subcluster in range(0, len(parities), 2):
left_cluster = parities[subcluster]
right_cluster = parities[subcluster + 1]
left_cov_matrix = self._cov_matriz[left_cluster].loc[left_cluster]
inversa_diagonal = 1 / np.diag(left_cov_matrix.values)
weights_left_cluster = inversa_diagonal / \
np.sum(inversa_diagonal)
vol_left_cluster = np.dot(weights_left_cluster, np.dot(
left_cov_matrix, weights_left_cluster))
right_cov_matrix = self._cov_matriz[right_cluster].loc[right_cluster]
inversa_diagonal = 1 / np.diag(right_cov_matrix.values)
weights_right_cluster = inversa_diagonal / \
np.sum(inversa_diagonal)
vol_right_cluster = np.dot(weights_right_cluster, np.dot(
right_cov_matrix, weights_right_cluster))
alocation_factor = 1 - vol_left_cluster / \
(vol_left_cluster + vol_right_cluster)
weights[left_cluster] *= alocation_factor
weights[right_cluster] *= 1 - alocation_factor
weights = weights[self._columns].to_numpy()
return weights | # Based on https://medium.com/turing-talks/otimiza%C3%A7%C3%A3o-de-investimentos-com-intelig%C3%AAncia-artificial-548cf34dad4d
import seaborn as sns
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import numpy as np
class HRP:
def __init__(self, cov_matrix: pd.DataFrame):
self._cov_matriz = cov_matrix
self._columns = cov_matrix.columns.to_list()
def optimize(self):
seriation_columns = self._matrix_seriation()
weights = self._get_weights(seriation_columns)
return weights
def _matrix_seriation(self):
dendogram = sns.clustermap(
self._cov_matriz, method='ward', metric='euclidean')
seriation_columns = dendogram.dendrogram_col.reordered_ind
seriation_columns = [self._columns[index]
for index in seriation_columns]
return seriation_columns
def _get_weights(self, seriation_columns):
# Inicialização de weights
weights = pd.Series(1, index=seriation_columns)
parities = [seriation_columns]
while len(parities) > 0:
parities = [cluster[start:end]
for cluster in parities
for start, end in ((0, len(cluster) // 2), (len(cluster) // 2, len(cluster)))
if len(cluster) > 1]
for subcluster in range(0, len(parities), 2):
left_cluster = parities[subcluster]
right_cluster = parities[subcluster + 1]
left_cov_matrix = self._cov_matriz[left_cluster].loc[left_cluster]
inversa_diagonal = 1 / np.diag(left_cov_matrix.values)
weights_left_cluster = inversa_diagonal / \
np.sum(inversa_diagonal)
vol_left_cluster = np.dot(weights_left_cluster, np.dot(
left_cov_matrix, weights_left_cluster))
right_cov_matrix = self._cov_matriz[right_cluster].loc[right_cluster]
inversa_diagonal = 1 / np.diag(right_cov_matrix.values)
weights_right_cluster = inversa_diagonal / \
np.sum(inversa_diagonal)
vol_right_cluster = np.dot(weights_right_cluster, np.dot(
right_cov_matrix, weights_right_cluster))
alocation_factor = 1 - vol_left_cluster / \
(vol_left_cluster + vol_right_cluster)
weights[left_cluster] *= alocation_factor
weights[right_cluster] *= 1 - alocation_factor
weights = weights[self._columns].to_numpy()
return weights | en | 0.625187 | # Based on https://medium.com/turing-talks/otimiza%C3%A7%C3%A3o-de-investimentos-com-intelig%C3%AAncia-artificial-548cf34dad4d # Inicialização de weights | 2.467187 | 2 |
python-algorithm/leetcode/problem_1217.py | isudox/leetcode-solution | 5 | 6615441 | <filename>python-algorithm/leetcode/problem_1217.py
"""1217. Minimum Cost to Move Chips to The Same Position
https://leetcode.com/problems/minimum-cost-to-move-chips-to-the-same-position/
"""
from typing import List
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
cnt = [0, 0]
for pos in position:
cnt[pos % 2] += 1
return min(cnt)
if __name__ == '__main__':
sol = Solution()
print(sol.minCostToMoveChips([1, 2, 3]))
| <filename>python-algorithm/leetcode/problem_1217.py
"""1217. Minimum Cost to Move Chips to The Same Position
https://leetcode.com/problems/minimum-cost-to-move-chips-to-the-same-position/
"""
from typing import List
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
cnt = [0, 0]
for pos in position:
cnt[pos % 2] += 1
return min(cnt)
if __name__ == '__main__':
sol = Solution()
print(sol.minCostToMoveChips([1, 2, 3]))
| en | 0.660963 | 1217. Minimum Cost to Move Chips to The Same Position https://leetcode.com/problems/minimum-cost-to-move-chips-to-the-same-position/ | 3.74408 | 4 |
splitmotion.py | DVS-Lab/ppi-effect-sizes | 0 | 6615442 | #!/usr/bin/env/python
#this script takes the text file with 12 columns of movement (Movement_regressors.txt) and copies the first 6 motion parameters into a new file (motionfile_6columns.txt).
import sys, os
inmotionf = sys.argv[1]
outmotionf = sys.argv[2]
motionfile=open(inmotionf, 'r')
motionfile_6columns = open(outmotionf, 'w')
for row in motionfile:
columns = row.strip().split()
motionfile_6columns.write("\t".join(columns[:6])+"\n")
motionfile.close()
motionfile_6columns.close()
| #!/usr/bin/env/python
#this script takes the text file with 12 columns of movement (Movement_regressors.txt) and copies the first 6 motion parameters into a new file (motionfile_6columns.txt).
import sys, os
inmotionf = sys.argv[1]
outmotionf = sys.argv[2]
motionfile=open(inmotionf, 'r')
motionfile_6columns = open(outmotionf, 'w')
for row in motionfile:
columns = row.strip().split()
motionfile_6columns.write("\t".join(columns[:6])+"\n")
motionfile.close()
motionfile_6columns.close()
| en | 0.625959 | #!/usr/bin/env/python #this script takes the text file with 12 columns of movement (Movement_regressors.txt) and copies the first 6 motion parameters into a new file (motionfile_6columns.txt). | 3.115014 | 3 |
bin/md5sum_main.py | minersoft/miner | 1 | 6615443 | #
# Copyright <NAME>, 2014
#
import optparse
import sys
import hashlib
from bin_utils import reopenFileInBinMode, expandFiles
usage = "Usage: %prog [<options>] <file>..."
parser = optparse.OptionParser(usage=usage, version="1.0", prog="md5sum")
def parseOptions():
parser.add_option("-b", "--binary", dest="binary", action="store_true",
help="read files in binary mode (default)")
parser.add_option("-t", "--text", dest="binary", action="store_false",
help="read files in text mode")
parser.set_defaults(binary=True)
(options, files) = parser.parse_args()
return (options, files)
def md5sum(fileObj, name):
READ_BUF_SIZE = 128*1024
md5Obj = hashlib.md5()
while True:
buf = fileObj.read(READ_BUF_SIZE)
if not buf:
break
md5Obj.update(buf)
print md5Obj.digest().encode("hex") + "\t" + name
(options, files) = parseOptions()
files = expandFiles(files)
if not files:
if options.binary:
reopenFileInBinMode(sys.stdin)
md5sum(sys.stdin, "stdin")
sys.exit()
def openFile(options):
if options.file:
try:
out = open(options.file, "wb")
except Exception as e:
print >>sys.stderr, "Failed to open %s for writing" % options.file
print str(e)
sys.exit(1)
else:
out = sys.stdout
return out
openMode = "rb" if options.binary else "r"
ec = 0
for fileName in files:
try:
fileObj = open(fileName, openMode)
except Exception as e:
print >> sys.stderr, str(e)
ec = 1
continue
md5sum(fileObj, fileName)
fileObj.close()
ec = 0
sys.exit(ec)
| #
# Copyright <NAME>, 2014
#
import optparse
import sys
import hashlib
from bin_utils import reopenFileInBinMode, expandFiles
usage = "Usage: %prog [<options>] <file>..."
parser = optparse.OptionParser(usage=usage, version="1.0", prog="md5sum")
def parseOptions():
parser.add_option("-b", "--binary", dest="binary", action="store_true",
help="read files in binary mode (default)")
parser.add_option("-t", "--text", dest="binary", action="store_false",
help="read files in text mode")
parser.set_defaults(binary=True)
(options, files) = parser.parse_args()
return (options, files)
def md5sum(fileObj, name):
READ_BUF_SIZE = 128*1024
md5Obj = hashlib.md5()
while True:
buf = fileObj.read(READ_BUF_SIZE)
if not buf:
break
md5Obj.update(buf)
print md5Obj.digest().encode("hex") + "\t" + name
(options, files) = parseOptions()
files = expandFiles(files)
if not files:
if options.binary:
reopenFileInBinMode(sys.stdin)
md5sum(sys.stdin, "stdin")
sys.exit()
def openFile(options):
if options.file:
try:
out = open(options.file, "wb")
except Exception as e:
print >>sys.stderr, "Failed to open %s for writing" % options.file
print str(e)
sys.exit(1)
else:
out = sys.stdout
return out
openMode = "rb" if options.binary else "r"
ec = 0
for fileName in files:
try:
fileObj = open(fileName, openMode)
except Exception as e:
print >> sys.stderr, str(e)
ec = 1
continue
md5sum(fileObj, fileName)
fileObj.close()
ec = 0
sys.exit(ec)
| en | 0.611836 | # # Copyright <NAME>, 2014 # | 2.438674 | 2 |
get_colors.py | Anastasia-Paliy/Dominant_Colors | 0 | 6615444 | <gh_stars>0
from bs4 import BeautifulSoup
import requests
import json
url = 'https://colorscheme.ru/color-names.html'
page = requests.get(url)
print(page.status_code)
soup = BeautifulSoup(page.text, 'html.parser')
list_0 = soup.findAll('td')
names = list_0[1::6]
for i in range(len(names)):
names[i] = str(names[i])[4::][:-5:]
slots = list_0[::6]
for i in range(len(slots)):
slots[i] = str(slots[i])[13::][:-7:]
dictionary = dict(zip(slots, names))
with open("color_names.json", 'w') as file:
json.dump(dictionary, file, indent = 4)
| from bs4 import BeautifulSoup
import requests
import json
url = 'https://colorscheme.ru/color-names.html'
page = requests.get(url)
print(page.status_code)
soup = BeautifulSoup(page.text, 'html.parser')
list_0 = soup.findAll('td')
names = list_0[1::6]
for i in range(len(names)):
names[i] = str(names[i])[4::][:-5:]
slots = list_0[::6]
for i in range(len(slots)):
slots[i] = str(slots[i])[13::][:-7:]
dictionary = dict(zip(slots, names))
with open("color_names.json", 'w') as file:
json.dump(dictionary, file, indent = 4) | none | 1 | 2.81574 | 3 | |
benchmarks/code_splice/csf.py | PSSF23/SPDT | 3 | 6615445 | """
Author: <NAME>
"""
import time
import numpy as np
import pandas as pd
from numpy.random import permutation
from sklearn.model_selection import train_test_split
from sdtf import CascadeStreamForest
def write_result(filename, acc_ls):
"""Writes results to specified text file"""
output = open(filename, "w")
for acc in acc_ls:
output.write(str(acc) + "\n")
def prediction(classifier):
"""Generates predictions from model"""
predictions = classifier.predict(X_test)
p_t = 0
for i in range(X_test.shape[0]):
if predictions[i] == y_test[i]:
p_t += 1
return p_t / X_test.shape[0]
def experiment_csf():
"""Runs experiments for Cascade Stream Forest"""
csf_l = []
train_time_l = []
test_time_l = []
csf = CascadeStreamForest()
for i in range(23):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
csf.partial_fit(X_t, y_t, classes=[0, 1, 2])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
csf_l.append(prediction(csf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Reformat the train times
for i in range(1, 23):
train_time_l[i] += train_time_l[i - 1]
return csf_l, train_time_l, test_time_l
# prepare splice DNA data
df = pd.read_csv("../dna.csv")
X = df.drop(["Label"], axis=1).values
y = df["Label"].values
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Perform experiments
csf_acc_l = []
csf_train_t_l = []
csf_test_t_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
csf_acc, csf_train_t, csf_test_t = experiment_csf()
csf_acc_l.append(csf_acc)
csf_train_t_l.append(csf_train_t)
csf_test_t_l.append(csf_test_t)
write_result("../csf/splice_acc.txt", csf_acc_l)
write_result("../csf/splice_train_t.txt", csf_train_t_l)
write_result("../csf/splice_test_t.txt", csf_test_t_l)
| """
Author: <NAME>
"""
import time
import numpy as np
import pandas as pd
from numpy.random import permutation
from sklearn.model_selection import train_test_split
from sdtf import CascadeStreamForest
def write_result(filename, acc_ls):
"""Writes results to specified text file"""
output = open(filename, "w")
for acc in acc_ls:
output.write(str(acc) + "\n")
def prediction(classifier):
"""Generates predictions from model"""
predictions = classifier.predict(X_test)
p_t = 0
for i in range(X_test.shape[0]):
if predictions[i] == y_test[i]:
p_t += 1
return p_t / X_test.shape[0]
def experiment_csf():
"""Runs experiments for Cascade Stream Forest"""
csf_l = []
train_time_l = []
test_time_l = []
csf = CascadeStreamForest()
for i in range(23):
X_t = X_r[i * 100 : (i + 1) * 100]
y_t = y_r[i * 100 : (i + 1) * 100]
# Train the model
start_time = time.perf_counter()
csf.partial_fit(X_t, y_t, classes=[0, 1, 2])
end_time = time.perf_counter()
train_time_l.append(end_time - start_time)
# Test the model
start_time = time.perf_counter()
csf_l.append(prediction(csf))
end_time = time.perf_counter()
test_time_l.append(end_time - start_time)
# Reformat the train times
for i in range(1, 23):
train_time_l[i] += train_time_l[i - 1]
return csf_l, train_time_l, test_time_l
# prepare splice DNA data
df = pd.read_csv("../dna.csv")
X = df.drop(["Label"], axis=1).values
y = df["Label"].values
X_train, X_test, y_train, y_test = train_test_split(X, y)
# Perform experiments
csf_acc_l = []
csf_train_t_l = []
csf_test_t_l = []
for i in range(10):
p = permutation(X_train.shape[0])
X_r = X_train[p]
y_r = y_train[p]
csf_acc, csf_train_t, csf_test_t = experiment_csf()
csf_acc_l.append(csf_acc)
csf_train_t_l.append(csf_train_t)
csf_test_t_l.append(csf_test_t)
write_result("../csf/splice_acc.txt", csf_acc_l)
write_result("../csf/splice_train_t.txt", csf_train_t_l)
write_result("../csf/splice_test_t.txt", csf_test_t_l)
| en | 0.722734 | Author: <NAME> Writes results to specified text file Generates predictions from model Runs experiments for Cascade Stream Forest # Train the model # Test the model # Reformat the train times # prepare splice DNA data # Perform experiments | 2.621423 | 3 |
python/setup.py | mookerji/pl2 | 1 | 6615446 | <gh_stars>1-10
#! /usr/bin/env python
from distutils.core import setup
setup(name = 'rtree',
version = '0.0.1',
description = 'Python RTree implementation',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/mookerji/pl2',
packages = ['rtree',
'rtree.rtree',
'rtree.types',
'rtree.utils',
'rtree.tests.test_bench',
'rtree.tests.test_rtree',
'rtree.tests.test_types',
'rtree.tests.test_utils'],
classifiers = ["Development Status :: 2 - Pre-Alpha",
"Environment :: Console"]
)
| #! /usr/bin/env python
from distutils.core import setup
setup(name = 'rtree',
version = '0.0.1',
description = 'Python RTree implementation',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/mookerji/pl2',
packages = ['rtree',
'rtree.rtree',
'rtree.types',
'rtree.utils',
'rtree.tests.test_bench',
'rtree.tests.test_rtree',
'rtree.tests.test_types',
'rtree.tests.test_utils'],
classifiers = ["Development Status :: 2 - Pre-Alpha",
"Environment :: Console"]
) | ru | 0.148623 | #! /usr/bin/env python | 1.085238 | 1 |
kanimysql/func.py | fx-kirin/kanimysql | 0 | 6615447 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 zenbook <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
def count(column):
return '#COUNT(`%s`)' % (column)
def sum(column):
return '#SUM(`%s`)' % (column)
def desc(column):
return '#`%s` DESC' % (column)
def escape(query):
return '#%s' % (query)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2018 zenbook <<EMAIL>>
#
# Distributed under terms of the MIT license.
"""
"""
def count(column):
return '#COUNT(`%s`)' % (column)
def sum(column):
return '#SUM(`%s`)' % (column)
def desc(column):
return '#`%s` DESC' % (column)
def escape(query):
return '#%s' % (query)
| en | 0.618121 | #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2018 zenbook <<EMAIL>> # # Distributed under terms of the MIT license. | 2.400355 | 2 |
module/multimedia/configuration/subtitle.py | moonwatcher/tubo | 0 | 6615448 | # -*- coding: utf-8 -*-
{
'subtitle filter':{
'punctuation':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'^[-?\.,!:;"\'\s]+(.*)$', '\\1'),
(ur'^(.*)[-?\.,!:;"\'\s]+$', '\\1'),
],
},
'leftover':{
'scope':'line',
'action':'drop',
'ignore case':True,
'expression':[
ur'^\([^\)]+\)$',
ur'^[\[\]\(\)]*$',
ur'^[-?\.,!:;"\'\s]*$',
],
},
'hebrew noise':{
'scope':'slide',
'action':'drop',
'ignore case':True,
'expression':[
ur':סנכרון',
ur':תרגום',
ur':שיפוץ',
ur':לפרטים',
ur'סונכרן',
ur'תורגם על ידי',
ur'תורגם חלקית',
ur'סנכרן לגרסה זו',
ur'תורגם ע"י',
ur'שופץ ע"י',
ur'תורגם משמיעה',
ur'קריעה וסינכרון',
ur'תוקן על ידי',
ur'תורגם על-ידי',
ur'תורגם ע"י',
ur'תוקן ע"י',
ur'הובא והוכן ע"י',
ur'תורגם וסוכנרן',
ur'תורגם וסונכרן',
ur'תוקן קלות ע"י',
ur'תרגום זה בוצע על ידי',
ur'סונכרן לגירסא זו ע"י',
ur'תרגום זה נעשה על ידי',
ur'תורגם עם הרבה זיעה על ידי',
ur'תורגם מספרדית ע"י אסף פארי',
ur'כתוביות ע"י',
ur'הגהה וסנכרון ע"י',
ur'שנוכל להמשיך לתרגם',
ur'הפרק מוקדש',
ur'מצוות טורק',
ur'shayx ע"י',
ur'pusel :סנכרון',
ur'תרגום: רותם ושמעון',
ur'שיפוץ: השייח\' הסעודי',
ur'שופץ ע"י השייח\' הסעודי',
ur'תרגום: שמעון ורותם אברג\'יל',
ur'כתובית זו הובאה',
ur'שופצה, נערכה וסונכרנה לגרסה זו',
ur'ברוכה הבאה אלוירה',
ur'לצוות מתרגמי האוס',
ur'אלוירה ברוכה הבאה',
ur'עמוס נמני',
ur'אינדיאנית שלי',
ur'יומולדת שמח',
ur'מוקדש לך',
ur'מונחים רפואיים - ג\'ון דו',
ur'מפורום תפוז',
ur'מוקדש לפולי שלי',
ur':כתוביות',
ur'^בלעדית עבור$',
ur'הורד מהאתר',
ur'על ההגהה workbook',
ur'מוקדש לכל אוהבי האוס אי שם',
ur'theterminator נערך ותוקן בשיתוף עם',
ur'התרגום נעשה על ידי המוריד',
ur'תורגם וסונוכרן משמיעה ע"י',
ur'\bצפייה מהנה\b',
ur'\bצפיה מהנה\b',
ur'נקרע ותוקן',
ur'אבי דניאלי',
ur'אוהבים את התרגומים שלנו',
ur'נקלענו למאבק',
ur'משפטי מתמשך',
ur'לבילד המתקשה בהבנת קרדיטים',
ur'אנא תרמו לנו כדי',
ur'הגהה על-ידי',
ur'^עריכה לשונית$',
ur'^white fang-תרגום: עמית יקיר ו$',
ur'ערן טלמור',
ur'\bעדי-בלי-בצל\b',
ur'\bבקרו אותנו בפורום\b',
ur'הודה בוז',
ur'\b-תודה מיוחדת ל\b',
ur'^extreme מקבוצת$',
ur'ialfan-ו mb0:עברית',
ur'י ביצה קשה',
ur'^ב$',
ur'^בי$',
ur'^ביצ$',
ur'^ביצה$',
ur'^ביצה ק$',
ur'^ביצה קש$',
ur'^ביצה קשה$',
ur'ליונהארט',
ur'\bמצוות פושל\b',
ur'\bassem נקרע ע"י\b',
ur'\bkawa: סנכרון\b',
ur'אוהבת לנצח, שרון',
],
},
'noise':{
'scope':'slide',
'action':'drop',
'ignore case':True,
'expression':[
ur'www\.allsubs\.org',
ur'\bswsub\b',
ur'\bresync\b',
ur'\b[a-za-z0-9\.]+@gmail.\s*com\b',
ur'cync\sby\slanmao',
ur'www\.1000fr\.com',
ur'www\.tvsubtitles\.net',
ur'ytet-vicky8800',
ur'www\.ydy\.com',
ur'sync:gagegao',
ur'frm-lanma',
ur'nowa\swizja',
ur'ssmink',
ur'\blinx\b',
ur'torec',
ur'\byanx26\b',
ur'\bgreenscorpion\b',
ur'\bneotrix\b',
ur'\bglfinish\b',
ur'\bshloogy\b',
ur'\.co\.il',
ur'\by0natan\b',
ur'\belad\b',
ur'sratim',
ur'donkey cr3w',
ur'r-subs',
ur'\[d-s\]',
ur'ponkoit',
ur'\bsubbie\b',
ur'\bxsesa\b',
ur'napisy pobrane',
ur'\bphaelox\b',
ur'divxstation',
ur'\bpetabit\b',
ur'\bronkey\b',
ur'chococat3@walla',
ur'warez',
ur'\bdrsub\b',
ur'\beliavgold\b',
ur'^elvira$',
ur'\blob93\b',
ur'\belvir\b',
ur'\boofir\b',
ur'\bkrok\b',
ur'\bqsubd\b',
ur'\bariel046\b',
ur'\bzipc\b',
ur'\btecnodrom\b',
ur'visiontext subtitles',
ur'english sdh',
ur'srulikg',
ur'lh translators team',
ur'[-=\s]+sub-zero[-=\s]+',
ur'lionetwork',
ur'^eric$',
ur'subz3ro',
ur'^david-z$',
ur'<EMAIL>',
ur'elran_o',
ur'mcsnagel',
ur'\boutwit\b',
ur'^gimly$',
ur'\btinyurl\b',
ur'\bfoxriver\b',
ur'\bextremesubs\b',
ur'megalomania tree',
ur'xmonwow',
ur'\bciwan\b',
ur'\bnata4ever\b',
ur'\byosefff\b',
ur'\bhentaiman\b',
ur'\bfoxi9\b',
ur'\bgamby\b',
ur'\bbrassica nigra\b',
ur'\bqsubs\b',
ur'\bsharetw\b',
ur'\bserethd\b',
ur'hazy7868',
ur'subscenter\.org'
ur'\blakota\b',
ur'\bnzigi\b'
ur'\bqwer90\b',
ur'roni_eliav',
ur'subscenter',
ur'\bkuniva\b',
ur'hdbits.org',
ur'addic7ed',
ur'hdsubs',
ur'corrected by elderman',
],
},
'typo':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'♪', ''),
(ur'¶', ''),
(ur'\b +(,|\.|\?|%|!)\b', '\\1 '),
(ur'\b(,|\.|\?|%|!) +\b', '\\1 '),
(ur'\.\s*\.\s*\.\.?', '...'),
(ur'</?[^>]+/?>', ''),
(ur'\'{2}', '"'),
(ur'\s+\)', ')'),
(ur'\(\s+', '('),
(ur'\s+\]', ']'),
(ur'\[\s+', '['),
(ur'\[[^\]]+\]\s*', ''),
(ur'^[^\]]+\]', ''),
(ur'\[[^\]]+$', ''),
(ur'\([#a-zA-Z0-9l\s]+\)', ''),
(ur'\([#a-zA-Z0-9l\s]+$', ''),
(ur'^[#a-zA-Z0-9l\s]+\)', ''),
(ur'^[-\s]+', ''),
(ur'[-\s]+$', ''),
(ur'\b^[-A-Z\s]+[0-9]*:\s*', ''),
(ur'(?<=[a-zA-Z\'])I', 'l'),
(ur'^[-\s]*$', ''),
],
},
'english typo':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'Theysaid', u'They said'),
(ur'\bIast\b', u'last'),
(ur'\bIook\b', u'look'),
(ur'\bIetting\b', u'letting'),
(ur'\bIet\b', u'let'),
(ur'\bIooking\b', u'looking'),
(ur'\bIife\b', u'life'),
(ur'\bIeft\b', u'left'),
(ur'\bIike\b', u'like'),
(ur'\bIittle\b', u'little'),
(ur'\bIadies\b', u'ladies'),
(ur'\bIearn\b', u'learn'),
(ur'\bIanded\b', u'landed'),
(ur'\bIocked\b', u'locked'),
(ur'\bIie\b', u'lie'),
(ur'\bIong\b', u'long'),
(ur'\bIine\b', u'line'),
(ur'\bIives\b', u'lives'),
(ur'\bIeave\b', u'leave'),
(ur'\bIawyer\b', u'lawyer'),
(ur'\bIogs\b', u'logs'),
(ur'\bIack\b', u'lack'),
(ur'\bIove\b', u'love'),
(ur'\bIot\b', u'lot'),
(ur'\bIanding\b', u'landing'),
(ur'\bIet\'s\b', u'let\'s'),
(ur'\bIand\b', u'land'),
(ur'\bIying\b', u'lying'),
(ur'\bIist\b', u'list'),
(ur'\bIoved\b', u'loved'),
(ur'\bIoss\b', u'loss'),
(ur'\bIied\b', u'lied'),
(ur'\bIaugh\b', u'laugh'),
(ur'\b(h|H)avert\b', u'\\1aven\'t'),
(ur'\b(w|W)asrt\b', u'\\1asn\'t'),
(ur'\b(d|D)oesrt\b', u'\\1oesn\'t'),
(ur'\b(d|D)ort\b', u'\\1on\'t'),
(ur'\b(d|D)idrt\b', u'\\1idn\'t'),
(ur'\b(a|A)irt\b', u'\\1in\'t'),
(ur'\b(i|I)srt\b', u'\\1sn\'t'),
(ur'\b(w|W)ort\b', u'\\1on\'t'),
(ur'\b(c|C|w|W|s|S)ouldrt\b', u'\\1ouldn\'t'),
(ur'\barert\b', u'aren\'t'),
(ur'\bls\b', u'Is'),
(ur'\b(L|l)f\b', u'If'),
(ur'\blt\b', u'It'),
(ur'\blt\'s\b', u'It\'s'),
(ur'\bl\'m\b', u'I\'m'),
(ur'\bl\'ll\b', u'I\'ll'),
(ur'\bl\'ve\b', u'I\'ve'),
(ur'\bl\b', u'I'),
(ur'\bln\b', u'In'),
(ur'\blmpossible\b', u'Impossible'),
(ur'\bIight\b', u'light'),
(ur'\bIevitation\b', u'levitation'),
(ur'\bIeaving\b', u'leaving'),
(ur'\bIooked\b', u'looked'),
(ur'\bIucky\b', u'lucky'),
(ur'\bIuck\b', u'luck'),
(ur'\bIater\b', u'later'),
(ur'\bIift\b', u'lift'),
(ur'\bIip\b', u'lip'),
(ur'\bIooks\b', u'looks'),
(ur'\bIaid\b', u'laid'),
(ur'\bIikely\b', u'likely'),
(ur'\bIow\b', u'low'),
(ur'\bIeast\b', u'least'),
(ur'\bIeader\b', u'leader'),
(ur'\bIocate\b', u'locate'),
(ur'\bIaw\b', u'law'),
(ur'\bIately\b', u'lately'),
(ur'\bIiar\b', u'liar'),
(ur'\bIate\b', u'late'),
(ur'\bIonger\b', u'longer'),
(ur'\bIive\b', u'live'),
],
},
},
}
| # -*- coding: utf-8 -*-
{
'subtitle filter':{
'punctuation':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'^[-?\.,!:;"\'\s]+(.*)$', '\\1'),
(ur'^(.*)[-?\.,!:;"\'\s]+$', '\\1'),
],
},
'leftover':{
'scope':'line',
'action':'drop',
'ignore case':True,
'expression':[
ur'^\([^\)]+\)$',
ur'^[\[\]\(\)]*$',
ur'^[-?\.,!:;"\'\s]*$',
],
},
'hebrew noise':{
'scope':'slide',
'action':'drop',
'ignore case':True,
'expression':[
ur':סנכרון',
ur':תרגום',
ur':שיפוץ',
ur':לפרטים',
ur'סונכרן',
ur'תורגם על ידי',
ur'תורגם חלקית',
ur'סנכרן לגרסה זו',
ur'תורגם ע"י',
ur'שופץ ע"י',
ur'תורגם משמיעה',
ur'קריעה וסינכרון',
ur'תוקן על ידי',
ur'תורגם על-ידי',
ur'תורגם ע"י',
ur'תוקן ע"י',
ur'הובא והוכן ע"י',
ur'תורגם וסוכנרן',
ur'תורגם וסונכרן',
ur'תוקן קלות ע"י',
ur'תרגום זה בוצע על ידי',
ur'סונכרן לגירסא זו ע"י',
ur'תרגום זה נעשה על ידי',
ur'תורגם עם הרבה זיעה על ידי',
ur'תורגם מספרדית ע"י אסף פארי',
ur'כתוביות ע"י',
ur'הגהה וסנכרון ע"י',
ur'שנוכל להמשיך לתרגם',
ur'הפרק מוקדש',
ur'מצוות טורק',
ur'shayx ע"י',
ur'pusel :סנכרון',
ur'תרגום: רותם ושמעון',
ur'שיפוץ: השייח\' הסעודי',
ur'שופץ ע"י השייח\' הסעודי',
ur'תרגום: שמעון ורותם אברג\'יל',
ur'כתובית זו הובאה',
ur'שופצה, נערכה וסונכרנה לגרסה זו',
ur'ברוכה הבאה אלוירה',
ur'לצוות מתרגמי האוס',
ur'אלוירה ברוכה הבאה',
ur'עמוס נמני',
ur'אינדיאנית שלי',
ur'יומולדת שמח',
ur'מוקדש לך',
ur'מונחים רפואיים - ג\'ון דו',
ur'מפורום תפוז',
ur'מוקדש לפולי שלי',
ur':כתוביות',
ur'^בלעדית עבור$',
ur'הורד מהאתר',
ur'על ההגהה workbook',
ur'מוקדש לכל אוהבי האוס אי שם',
ur'theterminator נערך ותוקן בשיתוף עם',
ur'התרגום נעשה על ידי המוריד',
ur'תורגם וסונוכרן משמיעה ע"י',
ur'\bצפייה מהנה\b',
ur'\bצפיה מהנה\b',
ur'נקרע ותוקן',
ur'אבי דניאלי',
ur'אוהבים את התרגומים שלנו',
ur'נקלענו למאבק',
ur'משפטי מתמשך',
ur'לבילד המתקשה בהבנת קרדיטים',
ur'אנא תרמו לנו כדי',
ur'הגהה על-ידי',
ur'^עריכה לשונית$',
ur'^white fang-תרגום: עמית יקיר ו$',
ur'ערן טלמור',
ur'\bעדי-בלי-בצל\b',
ur'\bבקרו אותנו בפורום\b',
ur'הודה בוז',
ur'\b-תודה מיוחדת ל\b',
ur'^extreme מקבוצת$',
ur'ialfan-ו mb0:עברית',
ur'י ביצה קשה',
ur'^ב$',
ur'^בי$',
ur'^ביצ$',
ur'^ביצה$',
ur'^ביצה ק$',
ur'^ביצה קש$',
ur'^ביצה קשה$',
ur'ליונהארט',
ur'\bמצוות פושל\b',
ur'\bassem נקרע ע"י\b',
ur'\bkawa: סנכרון\b',
ur'אוהבת לנצח, שרון',
],
},
'noise':{
'scope':'slide',
'action':'drop',
'ignore case':True,
'expression':[
ur'www\.allsubs\.org',
ur'\bswsub\b',
ur'\bresync\b',
ur'\b[a-za-z0-9\.]+@gmail.\s*com\b',
ur'cync\sby\slanmao',
ur'www\.1000fr\.com',
ur'www\.tvsubtitles\.net',
ur'ytet-vicky8800',
ur'www\.ydy\.com',
ur'sync:gagegao',
ur'frm-lanma',
ur'nowa\swizja',
ur'ssmink',
ur'\blinx\b',
ur'torec',
ur'\byanx26\b',
ur'\bgreenscorpion\b',
ur'\bneotrix\b',
ur'\bglfinish\b',
ur'\bshloogy\b',
ur'\.co\.il',
ur'\by0natan\b',
ur'\belad\b',
ur'sratim',
ur'donkey cr3w',
ur'r-subs',
ur'\[d-s\]',
ur'ponkoit',
ur'\bsubbie\b',
ur'\bxsesa\b',
ur'napisy pobrane',
ur'\bphaelox\b',
ur'divxstation',
ur'\bpetabit\b',
ur'\bronkey\b',
ur'chococat3@walla',
ur'warez',
ur'\bdrsub\b',
ur'\beliavgold\b',
ur'^elvira$',
ur'\blob93\b',
ur'\belvir\b',
ur'\boofir\b',
ur'\bkrok\b',
ur'\bqsubd\b',
ur'\bariel046\b',
ur'\bzipc\b',
ur'\btecnodrom\b',
ur'visiontext subtitles',
ur'english sdh',
ur'srulikg',
ur'lh translators team',
ur'[-=\s]+sub-zero[-=\s]+',
ur'lionetwork',
ur'^eric$',
ur'subz3ro',
ur'^david-z$',
ur'<EMAIL>',
ur'elran_o',
ur'mcsnagel',
ur'\boutwit\b',
ur'^gimly$',
ur'\btinyurl\b',
ur'\bfoxriver\b',
ur'\bextremesubs\b',
ur'megalomania tree',
ur'xmonwow',
ur'\bciwan\b',
ur'\bnata4ever\b',
ur'\byosefff\b',
ur'\bhentaiman\b',
ur'\bfoxi9\b',
ur'\bgamby\b',
ur'\bbrassica nigra\b',
ur'\bqsubs\b',
ur'\bsharetw\b',
ur'\bserethd\b',
ur'hazy7868',
ur'subscenter\.org'
ur'\blakota\b',
ur'\bnzigi\b'
ur'\bqwer90\b',
ur'roni_eliav',
ur'subscenter',
ur'\bkuniva\b',
ur'hdbits.org',
ur'addic7ed',
ur'hdsubs',
ur'corrected by elderman',
],
},
'typo':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'♪', ''),
(ur'¶', ''),
(ur'\b +(,|\.|\?|%|!)\b', '\\1 '),
(ur'\b(,|\.|\?|%|!) +\b', '\\1 '),
(ur'\.\s*\.\s*\.\.?', '...'),
(ur'</?[^>]+/?>', ''),
(ur'\'{2}', '"'),
(ur'\s+\)', ')'),
(ur'\(\s+', '('),
(ur'\s+\]', ']'),
(ur'\[\s+', '['),
(ur'\[[^\]]+\]\s*', ''),
(ur'^[^\]]+\]', ''),
(ur'\[[^\]]+$', ''),
(ur'\([#a-zA-Z0-9l\s]+\)', ''),
(ur'\([#a-zA-Z0-9l\s]+$', ''),
(ur'^[#a-zA-Z0-9l\s]+\)', ''),
(ur'^[-\s]+', ''),
(ur'[-\s]+$', ''),
(ur'\b^[-A-Z\s]+[0-9]*:\s*', ''),
(ur'(?<=[a-zA-Z\'])I', 'l'),
(ur'^[-\s]*$', ''),
],
},
'english typo':{
'scope':'line',
'action':'replace',
'ignore case':False,
'expression':[
(ur'Theysaid', u'They said'),
(ur'\bIast\b', u'last'),
(ur'\bIook\b', u'look'),
(ur'\bIetting\b', u'letting'),
(ur'\bIet\b', u'let'),
(ur'\bIooking\b', u'looking'),
(ur'\bIife\b', u'life'),
(ur'\bIeft\b', u'left'),
(ur'\bIike\b', u'like'),
(ur'\bIittle\b', u'little'),
(ur'\bIadies\b', u'ladies'),
(ur'\bIearn\b', u'learn'),
(ur'\bIanded\b', u'landed'),
(ur'\bIocked\b', u'locked'),
(ur'\bIie\b', u'lie'),
(ur'\bIong\b', u'long'),
(ur'\bIine\b', u'line'),
(ur'\bIives\b', u'lives'),
(ur'\bIeave\b', u'leave'),
(ur'\bIawyer\b', u'lawyer'),
(ur'\bIogs\b', u'logs'),
(ur'\bIack\b', u'lack'),
(ur'\bIove\b', u'love'),
(ur'\bIot\b', u'lot'),
(ur'\bIanding\b', u'landing'),
(ur'\bIet\'s\b', u'let\'s'),
(ur'\bIand\b', u'land'),
(ur'\bIying\b', u'lying'),
(ur'\bIist\b', u'list'),
(ur'\bIoved\b', u'loved'),
(ur'\bIoss\b', u'loss'),
(ur'\bIied\b', u'lied'),
(ur'\bIaugh\b', u'laugh'),
(ur'\b(h|H)avert\b', u'\\1aven\'t'),
(ur'\b(w|W)asrt\b', u'\\1asn\'t'),
(ur'\b(d|D)oesrt\b', u'\\1oesn\'t'),
(ur'\b(d|D)ort\b', u'\\1on\'t'),
(ur'\b(d|D)idrt\b', u'\\1idn\'t'),
(ur'\b(a|A)irt\b', u'\\1in\'t'),
(ur'\b(i|I)srt\b', u'\\1sn\'t'),
(ur'\b(w|W)ort\b', u'\\1on\'t'),
(ur'\b(c|C|w|W|s|S)ouldrt\b', u'\\1ouldn\'t'),
(ur'\barert\b', u'aren\'t'),
(ur'\bls\b', u'Is'),
(ur'\b(L|l)f\b', u'If'),
(ur'\blt\b', u'It'),
(ur'\blt\'s\b', u'It\'s'),
(ur'\bl\'m\b', u'I\'m'),
(ur'\bl\'ll\b', u'I\'ll'),
(ur'\bl\'ve\b', u'I\'ve'),
(ur'\bl\b', u'I'),
(ur'\bln\b', u'In'),
(ur'\blmpossible\b', u'Impossible'),
(ur'\bIight\b', u'light'),
(ur'\bIevitation\b', u'levitation'),
(ur'\bIeaving\b', u'leaving'),
(ur'\bIooked\b', u'looked'),
(ur'\bIucky\b', u'lucky'),
(ur'\bIuck\b', u'luck'),
(ur'\bIater\b', u'later'),
(ur'\bIift\b', u'lift'),
(ur'\bIip\b', u'lip'),
(ur'\bIooks\b', u'looks'),
(ur'\bIaid\b', u'laid'),
(ur'\bIikely\b', u'likely'),
(ur'\bIow\b', u'low'),
(ur'\bIeast\b', u'least'),
(ur'\bIeader\b', u'leader'),
(ur'\bIocate\b', u'locate'),
(ur'\bIaw\b', u'law'),
(ur'\bIately\b', u'lately'),
(ur'\bIiar\b', u'liar'),
(ur'\bIate\b', u'late'),
(ur'\bIonger\b', u'longer'),
(ur'\bIive\b', u'live'),
],
},
},
}
| en | 0.285244 | # -*- coding: utf-8 -*- #a-zA-Z0-9l\s]+\)', ''), #a-zA-Z0-9l\s]+$', ''), #a-zA-Z0-9l\s]+\)', ''), | 1.880343 | 2 |
comm.py | mindhog/mawb | 4 | 6615449 | <filename>comm.py
"""MAWB python communication module.
Contains code for communicating to the MAWB daemon.
"""
import struct
import subprocess
import time
from spug.io.proactor import getProactor, DataHandler, INETAddress
from mawb_pb2 import PBTrack, SetInitialState, SetInputParams, Response, RPC, \
RECORD, IDLE, PLAY
class BufferedDataHandler(DataHandler):
"""
The proactor data handler that manages our connection to the daemon.
This class is mostly pretty general, and could be refactored out into
the proactor library. The process() method should become abstract.
"""
def __init__(self):
self.__outputBuffer = b''
self._inputBuffer = b''
self.closeFlag = False
self.control = getProactor().makeControlQueue(self.__onControlEvent)
self.__messageCallbacks = {}
def readyToGet(self):
return self.__outputBuffer
def readyToPut(self):
return True
def readyToClose(self):
return self.closeFlag
def peek(self, size):
return self.__outputBuffer[:size]
def get(self, size):
self.__outputBuffer = self.__outputBuffer[size:]
def put(self, data):
self._inputBuffer += data
self.process()
def process(self):
"""
This gets called every time data is added to the input buffer.
It consumes a complete RPC message if there is one and dispatches
it to the appropriate handler.
"""
# Return if we don't have a complete message in the buffer.
if len(self._inputBuffer) < 4:
return
size, = struct.unpack('<I', self._inputBuffer[:4])
if len(self._inputBuffer) < size + 4:
return
# Now parse the message.
serializedMessage = self._inputBuffer[4:size + 4]
self._inputBuffer = self._inputBuffer[size + 4:]
resp = Response()
resp.ParseFromString(serializedMessage)
# Find the registered callback and call it.
try:
callback = self.__messageCallbacks[resp.msg_id]
except KeyError:
print('Response received with unknown message id %s' % resp.msg_id)
return
try:
callback(resp)
except:
print('Exception in callback:')
traceback.print_exc()
def __onControlEvent(self, event):
"""
Handler for events coming in on the control queue.
parms:
event: [str] Currently this is just data to be added to the
out-buffer.
"""
self.__outputBuffer += event
# External interface.
def queueForOutput(self, data):
"""
Queues a piece of data to be sent over the connection.
parms:
data: [str]
"""
self.control.add(data)
def registerMessageCallback(self, msgId, callback):
"""
Registers the function to be called when the response to the
message with the specified id is received.
parms:
msgId: [int] xxx
"""
self.__messageCallbacks[msgId] = callback
def close(self):
"""Close the connection."""
self.control.close()
self.control.add(b'')
self.closeFlag = True
class Comm:
"""The communicator. Sends RPCs to the daemon."""
def __init__(self, addr = '127.0.0.1', port = 8193):
self.handler = BufferedDataHandler()
self.conn = getProactor().makeConnection(
INETAddress(addr, port),
self.handler
)
self.__nextMsgId = 0
def close(self):
self.handler.close()
def __getMsgId(self):
msgId = self.__nextMsgId
self.__nextMsgId += 1
return msgId
def sendRPC(self, **kwargs):
rpc = RPC()
if 'callback' in kwargs:
rpc.msg_id = msgId = self.__getMsgId()
callback = kwargs['callback']
self.handler.registerMessageCallback(msgId, callback)
del kwargs['callback']
for attr, val in kwargs.items():
getattr(rpc, attr).CopyFrom(val)
parcel = rpc.SerializeToString()
data = struct.pack('<I', len(parcel)) + parcel
self.handler.queueForOutput(data)
class DaemonManager:
"""Lets you control the daemon."""
def __init__(self, awbdCmd = ['./awbd']):
self.daemon = None
self.awbdCmd = awbdCmd
self.proxy = None
def start(self):
if self.daemon:
print('Daemon already started.')
return
self.daemon = subprocess.Popen(self.awbdCmd)
self.proxy = Comm()
# TODO: repeatedly attempt to connect until we can get an "echo" back.
time.sleep(2)
def stop(self):
if self.daemon:
self.daemon.kill()
self.daemon.wait()
self.daemon = None
else:
print('Daemon not started.')
def __del__(self):
if self.daemon:
self.stop()
| <filename>comm.py
"""MAWB python communication module.
Contains code for communicating to the MAWB daemon.
"""
import struct
import subprocess
import time
from spug.io.proactor import getProactor, DataHandler, INETAddress
from mawb_pb2 import PBTrack, SetInitialState, SetInputParams, Response, RPC, \
RECORD, IDLE, PLAY
class BufferedDataHandler(DataHandler):
"""
The proactor data handler that manages our connection to the daemon.
This class is mostly pretty general, and could be refactored out into
the proactor library. The process() method should become abstract.
"""
def __init__(self):
self.__outputBuffer = b''
self._inputBuffer = b''
self.closeFlag = False
self.control = getProactor().makeControlQueue(self.__onControlEvent)
self.__messageCallbacks = {}
def readyToGet(self):
return self.__outputBuffer
def readyToPut(self):
return True
def readyToClose(self):
return self.closeFlag
def peek(self, size):
return self.__outputBuffer[:size]
def get(self, size):
self.__outputBuffer = self.__outputBuffer[size:]
def put(self, data):
self._inputBuffer += data
self.process()
def process(self):
"""
This gets called every time data is added to the input buffer.
It consumes a complete RPC message if there is one and dispatches
it to the appropriate handler.
"""
# Return if we don't have a complete message in the buffer.
if len(self._inputBuffer) < 4:
return
size, = struct.unpack('<I', self._inputBuffer[:4])
if len(self._inputBuffer) < size + 4:
return
# Now parse the message.
serializedMessage = self._inputBuffer[4:size + 4]
self._inputBuffer = self._inputBuffer[size + 4:]
resp = Response()
resp.ParseFromString(serializedMessage)
# Find the registered callback and call it.
try:
callback = self.__messageCallbacks[resp.msg_id]
except KeyError:
print('Response received with unknown message id %s' % resp.msg_id)
return
try:
callback(resp)
except:
print('Exception in callback:')
traceback.print_exc()
def __onControlEvent(self, event):
"""
Handler for events coming in on the control queue.
parms:
event: [str] Currently this is just data to be added to the
out-buffer.
"""
self.__outputBuffer += event
# External interface.
def queueForOutput(self, data):
"""
Queues a piece of data to be sent over the connection.
parms:
data: [str]
"""
self.control.add(data)
def registerMessageCallback(self, msgId, callback):
"""
Registers the function to be called when the response to the
message with the specified id is received.
parms:
msgId: [int] xxx
"""
self.__messageCallbacks[msgId] = callback
def close(self):
"""Close the connection."""
self.control.close()
self.control.add(b'')
self.closeFlag = True
class Comm:
"""The communicator. Sends RPCs to the daemon."""
def __init__(self, addr = '127.0.0.1', port = 8193):
self.handler = BufferedDataHandler()
self.conn = getProactor().makeConnection(
INETAddress(addr, port),
self.handler
)
self.__nextMsgId = 0
def close(self):
self.handler.close()
def __getMsgId(self):
msgId = self.__nextMsgId
self.__nextMsgId += 1
return msgId
def sendRPC(self, **kwargs):
rpc = RPC()
if 'callback' in kwargs:
rpc.msg_id = msgId = self.__getMsgId()
callback = kwargs['callback']
self.handler.registerMessageCallback(msgId, callback)
del kwargs['callback']
for attr, val in kwargs.items():
getattr(rpc, attr).CopyFrom(val)
parcel = rpc.SerializeToString()
data = struct.pack('<I', len(parcel)) + parcel
self.handler.queueForOutput(data)
class DaemonManager:
"""Lets you control the daemon."""
def __init__(self, awbdCmd = ['./awbd']):
self.daemon = None
self.awbdCmd = awbdCmd
self.proxy = None
def start(self):
if self.daemon:
print('Daemon already started.')
return
self.daemon = subprocess.Popen(self.awbdCmd)
self.proxy = Comm()
# TODO: repeatedly attempt to connect until we can get an "echo" back.
time.sleep(2)
def stop(self):
if self.daemon:
self.daemon.kill()
self.daemon.wait()
self.daemon = None
else:
print('Daemon not started.')
def __del__(self):
if self.daemon:
self.stop()
| en | 0.873997 | MAWB python communication module. Contains code for communicating to the MAWB daemon. The proactor data handler that manages our connection to the daemon. This class is mostly pretty general, and could be refactored out into the proactor library. The process() method should become abstract. This gets called every time data is added to the input buffer. It consumes a complete RPC message if there is one and dispatches it to the appropriate handler. # Return if we don't have a complete message in the buffer. # Now parse the message. # Find the registered callback and call it. Handler for events coming in on the control queue. parms: event: [str] Currently this is just data to be added to the out-buffer. # External interface. Queues a piece of data to be sent over the connection. parms: data: [str] Registers the function to be called when the response to the message with the specified id is received. parms: msgId: [int] xxx Close the connection. The communicator. Sends RPCs to the daemon. Lets you control the daemon. # TODO: repeatedly attempt to connect until we can get an "echo" back. | 2.415111 | 2 |
YusukeKaihara/program/api_count.py | YusukeKaihara/Graduate-Research-2021 | 0 | 6615450 | import json
import glob
import os
import pickle
import random
import collections
api_list=[]
api_count={}
paths = glob.glob("C:/Users/Kaihara/python/familygroup/Backdoor.Win32.Androm\\*")
path = random.sample(paths,k=10)
print(path)
for i in range(10):
data = path[i]
with open(data) as f:
j = json.load(f)
apis = [item['api'] for item in j['behavior']['processes'][1]['calls']]
api_count = collections.Counter(apis)
print(api_count)
with open('api_count.pickle', mode='wb') as fo:
pickle.dump(api_count,fo)
with open('file.pickle', mode='wb') as fi:
pickle.dump(path,fi)
| import json
import glob
import os
import pickle
import random
import collections
api_list=[]
api_count={}
paths = glob.glob("C:/Users/Kaihara/python/familygroup/Backdoor.Win32.Androm\\*")
path = random.sample(paths,k=10)
print(path)
for i in range(10):
data = path[i]
with open(data) as f:
j = json.load(f)
apis = [item['api'] for item in j['behavior']['processes'][1]['calls']]
api_count = collections.Counter(apis)
print(api_count)
with open('api_count.pickle', mode='wb') as fo:
pickle.dump(api_count,fo)
with open('file.pickle', mode='wb') as fi:
pickle.dump(path,fi)
| none | 1 | 2.41907 | 2 |